1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
202 // that predication is preferred, and this lists all options. I.e., the
203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
204 // and predicate the instructions accordingly. If tail-folding fails, there are
205 // different fallback strategies depending on these values:
206 namespace PreferPredicateTy {
207   enum Option {
208     ScalarEpilogue = 0,
209     PredicateElseScalarEpilogue,
210     PredicateOrDontVectorize
211   };
212 } // namespace PreferPredicateTy
213 
214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
215     "prefer-predicate-over-epilogue",
216     cl::init(PreferPredicateTy::ScalarEpilogue),
217     cl::Hidden,
218     cl::desc("Tail-folding and predication preferences over creating a scalar "
219              "epilogue loop."),
220     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
221                          "scalar-epilogue",
222                          "Don't tail-predicate loops, create scalar epilogue"),
223               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
224                          "predicate-else-scalar-epilogue",
225                          "prefer tail-folding, create scalar epilogue if tail "
226                          "folding fails."),
227               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
228                          "predicate-dont-vectorize",
229                          "prefers tail-folding, don't attempt vectorization if "
230                          "tail-folding fails.")));
231 
232 static cl::opt<bool> MaximizeBandwidth(
233     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
234     cl::desc("Maximize bandwidth when selecting vectorization factor which "
235              "will be determined by the smallest type in loop."));
236 
237 static cl::opt<bool> EnableInterleavedMemAccesses(
238     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
239     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
240 
241 /// An interleave-group may need masking if it resides in a block that needs
242 /// predication, or in order to mask away gaps.
243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
244     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
246 
247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
248     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
249     cl::desc("We don't interleave loops with a estimated constant trip count "
250              "below this number"));
251 
252 static cl::opt<unsigned> ForceTargetNumScalarRegs(
253     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
254     cl::desc("A flag that overrides the target's number of scalar registers."));
255 
256 static cl::opt<unsigned> ForceTargetNumVectorRegs(
257     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of vector registers."));
259 
260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
261     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's max interleave factor for "
263              "scalar loops."));
264 
265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
266     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "vectorized loops."));
269 
270 static cl::opt<unsigned> ForceTargetInstructionCost(
271     "force-target-instruction-cost", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's expected cost for "
273              "an instruction to a single constant value. Mostly "
274              "useful for getting consistent testing."));
275 
276 static cl::opt<bool> ForceTargetSupportsScalableVectors(
277     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
278     cl::desc(
279         "Pretend that scalable vectors are supported, even if the target does "
280         "not support them. This flag should only be used for testing."));
281 
282 static cl::opt<unsigned> SmallLoopCost(
283     "small-loop-cost", cl::init(20), cl::Hidden,
284     cl::desc(
285         "The cost of a loop that is considered 'small' by the interleaver."));
286 
287 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
288     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
289     cl::desc("Enable the use of the block frequency analysis to access PGO "
290              "heuristics minimizing code growth in cold regions and being more "
291              "aggressive in hot regions."));
292 
293 // Runtime interleave loops for load/store throughput.
294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
295     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
296     cl::desc(
297         "Enable runtime interleaving until load/store ports are saturated"));
298 
299 /// Interleave small loops with scalar reductions.
300 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
301     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
302     cl::desc("Enable interleaving for loops with small iteration counts that "
303              "contain scalar reductions to expose ILP."));
304 
305 /// The number of stores in a loop that are allowed to need predication.
306 static cl::opt<unsigned> NumberOfStoresToPredicate(
307     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
308     cl::desc("Max number of stores to be predicated behind an if."));
309 
310 static cl::opt<bool> EnableIndVarRegisterHeur(
311     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
312     cl::desc("Count the induction variable only once when interleaving"));
313 
314 static cl::opt<bool> EnableCondStoresVectorization(
315     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
316     cl::desc("Enable if predication of stores during vectorization."));
317 
318 static cl::opt<unsigned> MaxNestedScalarReductionIC(
319     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
320     cl::desc("The maximum interleave count to use when interleaving a scalar "
321              "reduction in a nested loop."));
322 
323 static cl::opt<bool>
324     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
325                            cl::Hidden,
326                            cl::desc("Prefer in-loop vector reductions, "
327                                     "overriding the targets preference."));
328 
329 static cl::opt<bool> PreferPredicatedReductionSelect(
330     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
331     cl::desc(
332         "Prefer predicating a reduction operation over an after loop select."));
333 
334 cl::opt<bool> EnableVPlanNativePath(
335     "enable-vplan-native-path", cl::init(false), cl::Hidden,
336     cl::desc("Enable VPlan-native vectorization path with "
337              "support for outer loop vectorization."));
338 
339 // FIXME: Remove this switch once we have divergence analysis. Currently we
340 // assume divergent non-backedge branches when this switch is true.
341 cl::opt<bool> EnableVPlanPredication(
342     "enable-vplan-predication", cl::init(false), cl::Hidden,
343     cl::desc("Enable VPlan-native vectorization path predicator with "
344              "support for outer loop vectorization."));
345 
346 // This flag enables the stress testing of the VPlan H-CFG construction in the
347 // VPlan-native vectorization path. It must be used in conjuction with
348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
349 // verification of the H-CFGs built.
350 static cl::opt<bool> VPlanBuildStressTest(
351     "vplan-build-stress-test", cl::init(false), cl::Hidden,
352     cl::desc(
353         "Build VPlan for every supported loop nest in the function and bail "
354         "out right after the build (stress test the VPlan H-CFG construction "
355         "in the VPlan-native vectorization path)."));
356 
357 cl::opt<bool> llvm::EnableLoopInterleaving(
358     "interleave-loops", cl::init(true), cl::Hidden,
359     cl::desc("Enable loop interleaving in Loop vectorization passes"));
360 cl::opt<bool> llvm::EnableLoopVectorization(
361     "vectorize-loops", cl::init(true), cl::Hidden,
362     cl::desc("Run the Loop vectorization passes"));
363 
364 /// A helper function that returns the type of loaded or stored value.
365 static Type *getMemInstValueType(Value *I) {
366   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
367          "Expected Load or Store instruction");
368   if (auto *LI = dyn_cast<LoadInst>(I))
369     return LI->getType();
370   return cast<StoreInst>(I)->getValueOperand()->getType();
371 }
372 
373 /// A helper function that returns true if the given type is irregular. The
374 /// type is irregular if its allocated size doesn't equal the store size of an
375 /// element of the corresponding vector type at the given vectorization factor.
376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) {
377   // Determine if an array of VF elements of type Ty is "bitcast compatible"
378   // with a <VF x Ty> vector.
379   if (VF.isVector()) {
380     auto *VectorTy = VectorType::get(Ty, VF);
381     return TypeSize::get(VF.getKnownMinValue() *
382                              DL.getTypeAllocSize(Ty).getFixedValue(),
383                          VF.isScalable()) != DL.getTypeStoreSize(VectorTy);
384   }
385 
386   // If the vectorization factor is one, we just check if an array of type Ty
387   // requires padding between elements.
388   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
389 }
390 
391 /// A helper function that returns the reciprocal of the block probability of
392 /// predicated blocks. If we return X, we are assuming the predicated block
393 /// will execute once for every X iterations of the loop header.
394 ///
395 /// TODO: We should use actual block probability here, if available. Currently,
396 ///       we always assume predicated blocks have a 50% chance of executing.
397 static unsigned getReciprocalPredBlockProb() { return 2; }
398 
399 /// A helper function that adds a 'fast' flag to floating-point operations.
400 static Value *addFastMathFlag(Value *V) {
401   if (isa<FPMathOperator>(V))
402     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
403   return V;
404 }
405 
406 /// A helper function that returns an integer or floating-point constant with
407 /// value C.
408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
409   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
410                            : ConstantFP::get(Ty, C);
411 }
412 
413 /// Returns "best known" trip count for the specified loop \p L as defined by
414 /// the following procedure:
415 ///   1) Returns exact trip count if it is known.
416 ///   2) Returns expected trip count according to profile data if any.
417 ///   3) Returns upper bound estimate if it is known.
418 ///   4) Returns None if all of the above failed.
419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
420   // Check if exact trip count is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
422     return ExpectedTC;
423 
424   // Check if there is an expected trip count available from profile data.
425   if (LoopVectorizeWithBlockFrequency)
426     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
427       return EstimatedTC;
428 
429   // Check if upper bound estimate is known.
430   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
431     return ExpectedTC;
432 
433   return None;
434 }
435 
436 namespace llvm {
437 
438 /// InnerLoopVectorizer vectorizes loops which contain only one basic
439 /// block to a specified vectorization factor (VF).
440 /// This class performs the widening of scalars into vectors, or multiple
441 /// scalars. This class also implements the following features:
442 /// * It inserts an epilogue loop for handling loops that don't have iteration
443 ///   counts that are known to be a multiple of the vectorization factor.
444 /// * It handles the code generation for reduction variables.
445 /// * Scalarization (implementation using scalars) of un-vectorizable
446 ///   instructions.
447 /// InnerLoopVectorizer does not perform any vectorization-legality
448 /// checks, and relies on the caller to check for the different legality
449 /// aspects. The InnerLoopVectorizer relies on the
450 /// LoopVectorizationLegality class to provide information about the induction
451 /// and reduction variables that were found to a given vectorization factor.
452 class InnerLoopVectorizer {
453 public:
454   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
455                       LoopInfo *LI, DominatorTree *DT,
456                       const TargetLibraryInfo *TLI,
457                       const TargetTransformInfo *TTI, AssumptionCache *AC,
458                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
459                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
460                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
461                       ProfileSummaryInfo *PSI)
462       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
463         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
464         Builder(PSE.getSE()->getContext()),
465         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM),
466         BFI(BFI), PSI(PSI) {
467     // Query this against the original loop and save it here because the profile
468     // of the original loop header may change as the transformation happens.
469     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
470         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
471   }
472 
473   virtual ~InnerLoopVectorizer() = default;
474 
475   /// Create a new empty loop that will contain vectorized instructions later
476   /// on, while the old loop will be used as the scalar remainder. Control flow
477   /// is generated around the vectorized (and scalar epilogue) loops consisting
478   /// of various checks and bypasses. Return the pre-header block of the new
479   /// loop.
480   /// In the case of epilogue vectorization, this function is overriden to
481   /// handle the more complex control flow around the loops.
482   virtual BasicBlock *createVectorizedLoopSkeleton();
483 
484   /// Widen a single instruction within the innermost loop.
485   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
486                         VPTransformState &State);
487 
488   /// Widen a single call instruction within the innermost loop.
489   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
490                             VPTransformState &State);
491 
492   /// Widen a single select instruction within the innermost loop.
493   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
494                               bool InvariantCond, VPTransformState &State);
495 
496   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
497   void fixVectorizedLoop(VPTransformState &State);
498 
499   // Return true if any runtime check is added.
500   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
501 
502   /// A type for vectorized values in the new loop. Each value from the
503   /// original loop, when vectorized, is represented by UF vector values in the
504   /// new unrolled loop, where UF is the unroll factor.
505   using VectorParts = SmallVector<Value *, 2>;
506 
507   /// Vectorize a single GetElementPtrInst based on information gathered and
508   /// decisions taken during planning.
509   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
510                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
511                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
512 
513   /// Vectorize a single PHINode in a block. This method handles the induction
514   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
515   /// arbitrary length vectors.
516   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
517                            Value *StartV, unsigned UF, ElementCount VF);
518 
519   /// A helper function to scalarize a single Instruction in the innermost loop.
520   /// Generates a sequence of scalar instances for each lane between \p MinLane
521   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
522   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
523   /// Instr's operands.
524   void scalarizeInstruction(Instruction *Instr, VPUser &Operands,
525                             const VPIteration &Instance, bool IfPredicateInstr,
526                             VPTransformState &State);
527 
528   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
529   /// is provided, the integer induction variable will first be truncated to
530   /// the corresponding type.
531   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
532                              VPValue *Def, VPValue *CastDef,
533                              VPTransformState &State);
534 
535   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
536   /// vector or scalar value on-demand if one is not yet available. When
537   /// vectorizing a loop, we visit the definition of an instruction before its
538   /// uses. When visiting the definition, we either vectorize or scalarize the
539   /// instruction, creating an entry for it in the corresponding map. (In some
540   /// cases, such as induction variables, we will create both vector and scalar
541   /// entries.) Then, as we encounter uses of the definition, we derive values
542   /// for each scalar or vector use unless such a value is already available.
543   /// For example, if we scalarize a definition and one of its uses is vector,
544   /// we build the required vector on-demand with an insertelement sequence
545   /// when visiting the use. Otherwise, if the use is scalar, we can use the
546   /// existing scalar definition.
547   ///
548   /// Return a value in the new loop corresponding to \p V from the original
549   /// loop at unroll index \p Part. If the value has already been vectorized,
550   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
551   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
552   /// a new vector value on-demand by inserting the scalar values into a vector
553   /// with an insertelement sequence. If the value has been neither vectorized
554   /// nor scalarized, it must be loop invariant, so we simply broadcast the
555   /// value into a vector.
556   Value *getOrCreateVectorValue(Value *V, unsigned Part);
557 
558   void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) {
559     VectorLoopValueMap.setVectorValue(Scalar, Part, Vector);
560   }
561 
562   void resetVectorValue(Value *Scalar, unsigned Part, Value *Vector) {
563     VectorLoopValueMap.resetVectorValue(Scalar, Part, Vector);
564   }
565 
566   void setScalarValue(Value *Scalar, const VPIteration &Instance, Value *V) {
567     VectorLoopValueMap.setScalarValue(Scalar, Instance, V);
568   }
569 
570   /// Return a value in the new loop corresponding to \p V from the original
571   /// loop at unroll and vector indices \p Instance. If the value has been
572   /// vectorized but not scalarized, the necessary extractelement instruction
573   /// will be generated.
574   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
575 
576   /// Construct the vector value of a scalarized value \p V one lane at a time.
577   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
578 
579   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
580                                  VPTransformState &State);
581 
582   /// Try to vectorize interleaved access group \p Group with the base address
583   /// given in \p Addr, optionally masking the vector operations if \p
584   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
585   /// values in the vectorized loop.
586   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
587                                 ArrayRef<VPValue *> VPDefs,
588                                 VPTransformState &State, VPValue *Addr,
589                                 ArrayRef<VPValue *> StoredValues,
590                                 VPValue *BlockInMask = nullptr);
591 
592   /// Vectorize Load and Store instructions with the base address given in \p
593   /// Addr, optionally masking the vector operations if \p BlockInMask is
594   /// non-null. Use \p State to translate given VPValues to IR values in the
595   /// vectorized loop.
596   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
597                                   VPValue *Def, VPValue *Addr,
598                                   VPValue *StoredValue, VPValue *BlockInMask);
599 
600   /// Set the debug location in the builder using the debug location in
601   /// the instruction.
602   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
603 
604   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
605   void fixNonInductionPHIs(VPTransformState &State);
606 
607   /// Create a broadcast instruction. This method generates a broadcast
608   /// instruction (shuffle) for loop invariant values and for the induction
609   /// value. If this is the induction variable then we extend it to N, N+1, ...
610   /// this is needed because each iteration in the loop corresponds to a SIMD
611   /// element.
612   virtual Value *getBroadcastInstrs(Value *V);
613 
614 protected:
615   friend class LoopVectorizationPlanner;
616 
617   /// A small list of PHINodes.
618   using PhiVector = SmallVector<PHINode *, 4>;
619 
620   /// A type for scalarized values in the new loop. Each value from the
621   /// original loop, when scalarized, is represented by UF x VF scalar values
622   /// in the new unrolled loop, where UF is the unroll factor and VF is the
623   /// vectorization factor.
624   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
625 
626   /// Set up the values of the IVs correctly when exiting the vector loop.
627   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
628                     Value *CountRoundDown, Value *EndValue,
629                     BasicBlock *MiddleBlock);
630 
631   /// Create a new induction variable inside L.
632   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
633                                    Value *Step, Instruction *DL);
634 
635   /// Handle all cross-iteration phis in the header.
636   void fixCrossIterationPHIs(VPTransformState &State);
637 
638   /// Fix a first-order recurrence. This is the second phase of vectorizing
639   /// this phi node.
640   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
641 
642   /// Fix a reduction cross-iteration phi. This is the second phase of
643   /// vectorizing this phi node.
644   void fixReduction(PHINode *Phi, VPTransformState &State);
645 
646   /// Clear NSW/NUW flags from reduction instructions if necessary.
647   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
648 
649   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
650   /// means we need to add the appropriate incoming value from the middle
651   /// block as exiting edges from the scalar epilogue loop (if present) are
652   /// already in place, and we exit the vector loop exclusively to the middle
653   /// block.
654   void fixLCSSAPHIs(VPTransformState &State);
655 
656   /// Iteratively sink the scalarized operands of a predicated instruction into
657   /// the block that was created for it.
658   void sinkScalarOperands(Instruction *PredInst);
659 
660   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
661   /// represented as.
662   void truncateToMinimalBitwidths();
663 
664   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
665   /// to each vector element of Val. The sequence starts at StartIndex.
666   /// \p Opcode is relevant for FP induction variable.
667   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
668                                Instruction::BinaryOps Opcode =
669                                Instruction::BinaryOpsEnd);
670 
671   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
672   /// variable on which to base the steps, \p Step is the size of the step, and
673   /// \p EntryVal is the value from the original loop that maps to the steps.
674   /// Note that \p EntryVal doesn't have to be an induction variable - it
675   /// can also be a truncate instruction.
676   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
677                         const InductionDescriptor &ID, VPValue *Def,
678                         VPValue *CastDef, VPTransformState &State);
679 
680   /// Create a vector induction phi node based on an existing scalar one. \p
681   /// EntryVal is the value from the original loop that maps to the vector phi
682   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
683   /// truncate instruction, instead of widening the original IV, we widen a
684   /// version of the IV truncated to \p EntryVal's type.
685   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
686                                        Value *Step, Value *Start,
687                                        Instruction *EntryVal, VPValue *Def,
688                                        VPValue *CastDef,
689                                        VPTransformState &State);
690 
691   /// Returns true if an instruction \p I should be scalarized instead of
692   /// vectorized for the chosen vectorization factor.
693   bool shouldScalarizeInstruction(Instruction *I) const;
694 
695   /// Returns true if we should generate a scalar version of \p IV.
696   bool needsScalarInduction(Instruction *IV) const;
697 
698   /// If there is a cast involved in the induction variable \p ID, which should
699   /// be ignored in the vectorized loop body, this function records the
700   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
701   /// cast. We had already proved that the casted Phi is equal to the uncasted
702   /// Phi in the vectorized loop (under a runtime guard), and therefore
703   /// there is no need to vectorize the cast - the same value can be used in the
704   /// vector loop for both the Phi and the cast.
705   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
706   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
707   ///
708   /// \p EntryVal is the value from the original loop that maps to the vector
709   /// phi node and is used to distinguish what is the IV currently being
710   /// processed - original one (if \p EntryVal is a phi corresponding to the
711   /// original IV) or the "newly-created" one based on the proof mentioned above
712   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
713   /// latter case \p EntryVal is a TruncInst and we must not record anything for
714   /// that IV, but it's error-prone to expect callers of this routine to care
715   /// about that, hence this explicit parameter.
716   void recordVectorLoopValueForInductionCast(
717       const InductionDescriptor &ID, const Instruction *EntryVal,
718       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
719       unsigned Part, unsigned Lane = UINT_MAX);
720 
721   /// Generate a shuffle sequence that will reverse the vector Vec.
722   virtual Value *reverseVector(Value *Vec);
723 
724   /// Returns (and creates if needed) the original loop trip count.
725   Value *getOrCreateTripCount(Loop *NewLoop);
726 
727   /// Returns (and creates if needed) the trip count of the widened loop.
728   Value *getOrCreateVectorTripCount(Loop *NewLoop);
729 
730   /// Returns a bitcasted value to the requested vector type.
731   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
732   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
733                                 const DataLayout &DL);
734 
735   /// Emit a bypass check to see if the vector trip count is zero, including if
736   /// it overflows.
737   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
738 
739   /// Emit a bypass check to see if all of the SCEV assumptions we've
740   /// had to make are correct.
741   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
742 
743   /// Emit bypass checks to check any memory assumptions we may have made.
744   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
745 
746   /// Compute the transformed value of Index at offset StartValue using step
747   /// StepValue.
748   /// For integer induction, returns StartValue + Index * StepValue.
749   /// For pointer induction, returns StartValue[Index * StepValue].
750   /// FIXME: The newly created binary instructions should contain nsw/nuw
751   /// flags, which can be found from the original scalar operations.
752   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
753                               const DataLayout &DL,
754                               const InductionDescriptor &ID) const;
755 
756   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
757   /// vector loop preheader, middle block and scalar preheader. Also
758   /// allocate a loop object for the new vector loop and return it.
759   Loop *createVectorLoopSkeleton(StringRef Prefix);
760 
761   /// Create new phi nodes for the induction variables to resume iteration count
762   /// in the scalar epilogue, from where the vectorized loop left off (given by
763   /// \p VectorTripCount).
764   /// In cases where the loop skeleton is more complicated (eg. epilogue
765   /// vectorization) and the resume values can come from an additional bypass
766   /// block, the \p AdditionalBypass pair provides information about the bypass
767   /// block and the end value on the edge from bypass to this loop.
768   void createInductionResumeValues(
769       Loop *L, Value *VectorTripCount,
770       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
771 
772   /// Complete the loop skeleton by adding debug MDs, creating appropriate
773   /// conditional branches in the middle block, preparing the builder and
774   /// running the verifier. Take in the vector loop \p L as argument, and return
775   /// the preheader of the completed vector loop.
776   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
777 
778   /// Add additional metadata to \p To that was not present on \p Orig.
779   ///
780   /// Currently this is used to add the noalias annotations based on the
781   /// inserted memchecks.  Use this for instructions that are *cloned* into the
782   /// vector loop.
783   void addNewMetadata(Instruction *To, const Instruction *Orig);
784 
785   /// Add metadata from one instruction to another.
786   ///
787   /// This includes both the original MDs from \p From and additional ones (\see
788   /// addNewMetadata).  Use this for *newly created* instructions in the vector
789   /// loop.
790   void addMetadata(Instruction *To, Instruction *From);
791 
792   /// Similar to the previous function but it adds the metadata to a
793   /// vector of instructions.
794   void addMetadata(ArrayRef<Value *> To, Instruction *From);
795 
796   /// Allow subclasses to override and print debug traces before/after vplan
797   /// execution, when trace information is requested.
798   virtual void printDebugTracesAtStart(){};
799   virtual void printDebugTracesAtEnd(){};
800 
801   /// The original loop.
802   Loop *OrigLoop;
803 
804   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
805   /// dynamic knowledge to simplify SCEV expressions and converts them to a
806   /// more usable form.
807   PredicatedScalarEvolution &PSE;
808 
809   /// Loop Info.
810   LoopInfo *LI;
811 
812   /// Dominator Tree.
813   DominatorTree *DT;
814 
815   /// Alias Analysis.
816   AAResults *AA;
817 
818   /// Target Library Info.
819   const TargetLibraryInfo *TLI;
820 
821   /// Target Transform Info.
822   const TargetTransformInfo *TTI;
823 
824   /// Assumption Cache.
825   AssumptionCache *AC;
826 
827   /// Interface to emit optimization remarks.
828   OptimizationRemarkEmitter *ORE;
829 
830   /// LoopVersioning.  It's only set up (non-null) if memchecks were
831   /// used.
832   ///
833   /// This is currently only used to add no-alias metadata based on the
834   /// memchecks.  The actually versioning is performed manually.
835   std::unique_ptr<LoopVersioning> LVer;
836 
837   /// The vectorization SIMD factor to use. Each vector will have this many
838   /// vector elements.
839   ElementCount VF;
840 
841   /// The vectorization unroll factor to use. Each scalar is vectorized to this
842   /// many different vector instructions.
843   unsigned UF;
844 
845   /// The builder that we use
846   IRBuilder<> Builder;
847 
848   // --- Vectorization state ---
849 
850   /// The vector-loop preheader.
851   BasicBlock *LoopVectorPreHeader;
852 
853   /// The scalar-loop preheader.
854   BasicBlock *LoopScalarPreHeader;
855 
856   /// Middle Block between the vector and the scalar.
857   BasicBlock *LoopMiddleBlock;
858 
859   /// The (unique) ExitBlock of the scalar loop.  Note that
860   /// there can be multiple exiting edges reaching this block.
861   BasicBlock *LoopExitBlock;
862 
863   /// The vector loop body.
864   BasicBlock *LoopVectorBody;
865 
866   /// The scalar loop body.
867   BasicBlock *LoopScalarBody;
868 
869   /// A list of all bypass blocks. The first block is the entry of the loop.
870   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
871 
872   /// The new Induction variable which was added to the new block.
873   PHINode *Induction = nullptr;
874 
875   /// The induction variable of the old basic block.
876   PHINode *OldInduction = nullptr;
877 
878   /// Maps values from the original loop to their corresponding values in the
879   /// vectorized loop. A key value can map to either vector values, scalar
880   /// values or both kinds of values, depending on whether the key was
881   /// vectorized and scalarized.
882   VectorizerValueMap VectorLoopValueMap;
883 
884   /// Store instructions that were predicated.
885   SmallVector<Instruction *, 4> PredicatedInstructions;
886 
887   /// Trip count of the original loop.
888   Value *TripCount = nullptr;
889 
890   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
891   Value *VectorTripCount = nullptr;
892 
893   /// The legality analysis.
894   LoopVectorizationLegality *Legal;
895 
896   /// The profitablity analysis.
897   LoopVectorizationCostModel *Cost;
898 
899   // Record whether runtime checks are added.
900   bool AddedSafetyChecks = false;
901 
902   // Holds the end values for each induction variable. We save the end values
903   // so we can later fix-up the external users of the induction variables.
904   DenseMap<PHINode *, Value *> IVEndValues;
905 
906   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
907   // fixed up at the end of vector code generation.
908   SmallVector<PHINode *, 8> OrigPHIsToFix;
909 
910   /// BFI and PSI are used to check for profile guided size optimizations.
911   BlockFrequencyInfo *BFI;
912   ProfileSummaryInfo *PSI;
913 
914   // Whether this loop should be optimized for size based on profile guided size
915   // optimizatios.
916   bool OptForSizeBasedOnProfile;
917 };
918 
919 class InnerLoopUnroller : public InnerLoopVectorizer {
920 public:
921   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
922                     LoopInfo *LI, DominatorTree *DT,
923                     const TargetLibraryInfo *TLI,
924                     const TargetTransformInfo *TTI, AssumptionCache *AC,
925                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
926                     LoopVectorizationLegality *LVL,
927                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
928                     ProfileSummaryInfo *PSI)
929       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
930                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
931                             BFI, PSI) {}
932 
933 private:
934   Value *getBroadcastInstrs(Value *V) override;
935   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
936                        Instruction::BinaryOps Opcode =
937                        Instruction::BinaryOpsEnd) override;
938   Value *reverseVector(Value *Vec) override;
939 };
940 
941 /// Encapsulate information regarding vectorization of a loop and its epilogue.
942 /// This information is meant to be updated and used across two stages of
943 /// epilogue vectorization.
944 struct EpilogueLoopVectorizationInfo {
945   ElementCount MainLoopVF = ElementCount::getFixed(0);
946   unsigned MainLoopUF = 0;
947   ElementCount EpilogueVF = ElementCount::getFixed(0);
948   unsigned EpilogueUF = 0;
949   BasicBlock *MainLoopIterationCountCheck = nullptr;
950   BasicBlock *EpilogueIterationCountCheck = nullptr;
951   BasicBlock *SCEVSafetyCheck = nullptr;
952   BasicBlock *MemSafetyCheck = nullptr;
953   Value *TripCount = nullptr;
954   Value *VectorTripCount = nullptr;
955 
956   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
957                                 unsigned EUF)
958       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
959         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
960     assert(EUF == 1 &&
961            "A high UF for the epilogue loop is likely not beneficial.");
962   }
963 };
964 
965 /// An extension of the inner loop vectorizer that creates a skeleton for a
966 /// vectorized loop that has its epilogue (residual) also vectorized.
967 /// The idea is to run the vplan on a given loop twice, firstly to setup the
968 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
969 /// from the first step and vectorize the epilogue.  This is achieved by
970 /// deriving two concrete strategy classes from this base class and invoking
971 /// them in succession from the loop vectorizer planner.
972 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
973 public:
974   InnerLoopAndEpilogueVectorizer(
975       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
976       DominatorTree *DT, const TargetLibraryInfo *TLI,
977       const TargetTransformInfo *TTI, AssumptionCache *AC,
978       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
979       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
980       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
981       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
982                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI),
983         EPI(EPI) {}
984 
985   // Override this function to handle the more complex control flow around the
986   // three loops.
987   BasicBlock *createVectorizedLoopSkeleton() final override {
988     return createEpilogueVectorizedLoopSkeleton();
989   }
990 
991   /// The interface for creating a vectorized skeleton using one of two
992   /// different strategies, each corresponding to one execution of the vplan
993   /// as described above.
994   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
995 
996   /// Holds and updates state information required to vectorize the main loop
997   /// and its epilogue in two separate passes. This setup helps us avoid
998   /// regenerating and recomputing runtime safety checks. It also helps us to
999   /// shorten the iteration-count-check path length for the cases where the
1000   /// iteration count of the loop is so small that the main vector loop is
1001   /// completely skipped.
1002   EpilogueLoopVectorizationInfo &EPI;
1003 };
1004 
1005 /// A specialized derived class of inner loop vectorizer that performs
1006 /// vectorization of *main* loops in the process of vectorizing loops and their
1007 /// epilogues.
1008 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
1009 public:
1010   EpilogueVectorizerMainLoop(
1011       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1012       DominatorTree *DT, const TargetLibraryInfo *TLI,
1013       const TargetTransformInfo *TTI, AssumptionCache *AC,
1014       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1015       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1016       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
1017       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1018                                        EPI, LVL, CM, BFI, PSI) {}
1019   /// Implements the interface for creating a vectorized skeleton using the
1020   /// *main loop* strategy (ie the first pass of vplan execution).
1021   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1022 
1023 protected:
1024   /// Emits an iteration count bypass check once for the main loop (when \p
1025   /// ForEpilogue is false) and once for the epilogue loop (when \p
1026   /// ForEpilogue is true).
1027   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
1028                                              bool ForEpilogue);
1029   void printDebugTracesAtStart() override;
1030   void printDebugTracesAtEnd() override;
1031 };
1032 
1033 // A specialized derived class of inner loop vectorizer that performs
1034 // vectorization of *epilogue* loops in the process of vectorizing loops and
1035 // their epilogues.
1036 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
1037 public:
1038   EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
1039                     LoopInfo *LI, DominatorTree *DT,
1040                     const TargetLibraryInfo *TLI,
1041                     const TargetTransformInfo *TTI, AssumptionCache *AC,
1042                     OptimizationRemarkEmitter *ORE,
1043                     EpilogueLoopVectorizationInfo &EPI,
1044                     LoopVectorizationLegality *LVL,
1045                     llvm::LoopVectorizationCostModel *CM,
1046                     BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
1047       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1048                                        EPI, LVL, CM, BFI, PSI) {}
1049   /// Implements the interface for creating a vectorized skeleton using the
1050   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1051   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1052 
1053 protected:
1054   /// Emits an iteration count bypass check after the main vector loop has
1055   /// finished to see if there are any iterations left to execute by either
1056   /// the vector epilogue or the scalar epilogue.
1057   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1058                                                       BasicBlock *Bypass,
1059                                                       BasicBlock *Insert);
1060   void printDebugTracesAtStart() override;
1061   void printDebugTracesAtEnd() override;
1062 };
1063 } // end namespace llvm
1064 
1065 /// Look for a meaningful debug location on the instruction or it's
1066 /// operands.
1067 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1068   if (!I)
1069     return I;
1070 
1071   DebugLoc Empty;
1072   if (I->getDebugLoc() != Empty)
1073     return I;
1074 
1075   for (Use &Op : I->operands()) {
1076     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1077       if (OpInst->getDebugLoc() != Empty)
1078         return OpInst;
1079   }
1080 
1081   return I;
1082 }
1083 
1084 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1085   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1086     const DILocation *DIL = Inst->getDebugLoc();
1087     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1088         !isa<DbgInfoIntrinsic>(Inst)) {
1089       assert(!VF.isScalable() && "scalable vectors not yet supported.");
1090       auto NewDIL =
1091           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1092       if (NewDIL)
1093         B.SetCurrentDebugLocation(NewDIL.getValue());
1094       else
1095         LLVM_DEBUG(dbgs()
1096                    << "Failed to create new discriminator: "
1097                    << DIL->getFilename() << " Line: " << DIL->getLine());
1098     }
1099     else
1100       B.SetCurrentDebugLocation(DIL);
1101   } else
1102     B.SetCurrentDebugLocation(DebugLoc());
1103 }
1104 
1105 /// Write a record \p DebugMsg about vectorization failure to the debug
1106 /// output stream. If \p I is passed, it is an instruction that prevents
1107 /// vectorization.
1108 #ifndef NDEBUG
1109 static void debugVectorizationFailure(const StringRef DebugMsg,
1110     Instruction *I) {
1111   dbgs() << "LV: Not vectorizing: " << DebugMsg;
1112   if (I != nullptr)
1113     dbgs() << " " << *I;
1114   else
1115     dbgs() << '.';
1116   dbgs() << '\n';
1117 }
1118 #endif
1119 
1120 /// Create an analysis remark that explains why vectorization failed
1121 ///
1122 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1123 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1124 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1125 /// the location of the remark.  \return the remark object that can be
1126 /// streamed to.
1127 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1128     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1129   Value *CodeRegion = TheLoop->getHeader();
1130   DebugLoc DL = TheLoop->getStartLoc();
1131 
1132   if (I) {
1133     CodeRegion = I->getParent();
1134     // If there is no debug location attached to the instruction, revert back to
1135     // using the loop's.
1136     if (I->getDebugLoc())
1137       DL = I->getDebugLoc();
1138   }
1139 
1140   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
1141   R << "loop not vectorized: ";
1142   return R;
1143 }
1144 
1145 /// Return a value for Step multiplied by VF.
1146 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1147   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1148   Constant *StepVal = ConstantInt::get(
1149       Step->getType(),
1150       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1151   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1152 }
1153 
1154 namespace llvm {
1155 
1156 void reportVectorizationFailure(const StringRef DebugMsg,
1157     const StringRef OREMsg, const StringRef ORETag,
1158     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
1159   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
1160   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1161   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
1162                 ORETag, TheLoop, I) << OREMsg);
1163 }
1164 
1165 } // end namespace llvm
1166 
1167 #ifndef NDEBUG
1168 /// \return string containing a file name and a line # for the given loop.
1169 static std::string getDebugLocString(const Loop *L) {
1170   std::string Result;
1171   if (L) {
1172     raw_string_ostream OS(Result);
1173     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1174       LoopDbgLoc.print(OS);
1175     else
1176       // Just print the module name.
1177       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1178     OS.flush();
1179   }
1180   return Result;
1181 }
1182 #endif
1183 
1184 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1185                                          const Instruction *Orig) {
1186   // If the loop was versioned with memchecks, add the corresponding no-alias
1187   // metadata.
1188   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1189     LVer->annotateInstWithNoAlias(To, Orig);
1190 }
1191 
1192 void InnerLoopVectorizer::addMetadata(Instruction *To,
1193                                       Instruction *From) {
1194   propagateMetadata(To, From);
1195   addNewMetadata(To, From);
1196 }
1197 
1198 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1199                                       Instruction *From) {
1200   for (Value *V : To) {
1201     if (Instruction *I = dyn_cast<Instruction>(V))
1202       addMetadata(I, From);
1203   }
1204 }
1205 
1206 namespace llvm {
1207 
1208 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1209 // lowered.
1210 enum ScalarEpilogueLowering {
1211 
1212   // The default: allowing scalar epilogues.
1213   CM_ScalarEpilogueAllowed,
1214 
1215   // Vectorization with OptForSize: don't allow epilogues.
1216   CM_ScalarEpilogueNotAllowedOptSize,
1217 
1218   // A special case of vectorisation with OptForSize: loops with a very small
1219   // trip count are considered for vectorization under OptForSize, thereby
1220   // making sure the cost of their loop body is dominant, free of runtime
1221   // guards and scalar iteration overheads.
1222   CM_ScalarEpilogueNotAllowedLowTripLoop,
1223 
1224   // Loop hint predicate indicating an epilogue is undesired.
1225   CM_ScalarEpilogueNotNeededUsePredicate,
1226 
1227   // Directive indicating we must either tail fold or not vectorize
1228   CM_ScalarEpilogueNotAllowedUsePredicate
1229 };
1230 
1231 /// LoopVectorizationCostModel - estimates the expected speedups due to
1232 /// vectorization.
1233 /// In many cases vectorization is not profitable. This can happen because of
1234 /// a number of reasons. In this class we mainly attempt to predict the
1235 /// expected speedup/slowdowns due to the supported instruction set. We use the
1236 /// TargetTransformInfo to query the different backends for the cost of
1237 /// different operations.
1238 class LoopVectorizationCostModel {
1239 public:
1240   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1241                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1242                              LoopVectorizationLegality *Legal,
1243                              const TargetTransformInfo &TTI,
1244                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1245                              AssumptionCache *AC,
1246                              OptimizationRemarkEmitter *ORE, const Function *F,
1247                              const LoopVectorizeHints *Hints,
1248                              InterleavedAccessInfo &IAI)
1249       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1250         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1251         Hints(Hints), InterleaveInfo(IAI) {}
1252 
1253   /// \return An upper bound for the vectorization factor, or None if
1254   /// vectorization and interleaving should be avoided up front.
1255   Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC);
1256 
1257   /// \return True if runtime checks are required for vectorization, and false
1258   /// otherwise.
1259   bool runtimeChecksRequired();
1260 
1261   /// \return The most profitable vectorization factor and the cost of that VF.
1262   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1263   /// then this vectorization factor will be selected if vectorization is
1264   /// possible.
1265   VectorizationFactor selectVectorizationFactor(ElementCount MaxVF);
1266   VectorizationFactor
1267   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1268                                     const LoopVectorizationPlanner &LVP);
1269 
1270   /// Setup cost-based decisions for user vectorization factor.
1271   void selectUserVectorizationFactor(ElementCount UserVF) {
1272     collectUniformsAndScalars(UserVF);
1273     collectInstsToScalarize(UserVF);
1274   }
1275 
1276   /// \return The size (in bits) of the smallest and widest types in the code
1277   /// that needs to be vectorized. We ignore values that remain scalar such as
1278   /// 64 bit loop indices.
1279   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1280 
1281   /// \return The desired interleave count.
1282   /// If interleave count has been specified by metadata it will be returned.
1283   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1284   /// are the selected vectorization factor and the cost of the selected VF.
1285   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1286 
1287   /// Memory access instruction may be vectorized in more than one way.
1288   /// Form of instruction after vectorization depends on cost.
1289   /// This function takes cost-based decisions for Load/Store instructions
1290   /// and collects them in a map. This decisions map is used for building
1291   /// the lists of loop-uniform and loop-scalar instructions.
1292   /// The calculated cost is saved with widening decision in order to
1293   /// avoid redundant calculations.
1294   void setCostBasedWideningDecision(ElementCount VF);
1295 
1296   /// A struct that represents some properties of the register usage
1297   /// of a loop.
1298   struct RegisterUsage {
1299     /// Holds the number of loop invariant values that are used in the loop.
1300     /// The key is ClassID of target-provided register class.
1301     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1302     /// Holds the maximum number of concurrent live intervals in the loop.
1303     /// The key is ClassID of target-provided register class.
1304     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1305   };
1306 
1307   /// \return Returns information about the register usages of the loop for the
1308   /// given vectorization factors.
1309   SmallVector<RegisterUsage, 8>
1310   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1311 
1312   /// Collect values we want to ignore in the cost model.
1313   void collectValuesToIgnore();
1314 
1315   /// Split reductions into those that happen in the loop, and those that happen
1316   /// outside. In loop reductions are collected into InLoopReductionChains.
1317   void collectInLoopReductions();
1318 
1319   /// \returns The smallest bitwidth each instruction can be represented with.
1320   /// The vector equivalents of these instructions should be truncated to this
1321   /// type.
1322   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1323     return MinBWs;
1324   }
1325 
1326   /// \returns True if it is more profitable to scalarize instruction \p I for
1327   /// vectorization factor \p VF.
1328   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1329     assert(VF.isVector() &&
1330            "Profitable to scalarize relevant only for VF > 1.");
1331 
1332     // Cost model is not run in the VPlan-native path - return conservative
1333     // result until this changes.
1334     if (EnableVPlanNativePath)
1335       return false;
1336 
1337     auto Scalars = InstsToScalarize.find(VF);
1338     assert(Scalars != InstsToScalarize.end() &&
1339            "VF not yet analyzed for scalarization profitability");
1340     return Scalars->second.find(I) != Scalars->second.end();
1341   }
1342 
1343   /// Returns true if \p I is known to be uniform after vectorization.
1344   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1345     if (VF.isScalar())
1346       return true;
1347 
1348     // Cost model is not run in the VPlan-native path - return conservative
1349     // result until this changes.
1350     if (EnableVPlanNativePath)
1351       return false;
1352 
1353     auto UniformsPerVF = Uniforms.find(VF);
1354     assert(UniformsPerVF != Uniforms.end() &&
1355            "VF not yet analyzed for uniformity");
1356     return UniformsPerVF->second.count(I);
1357   }
1358 
1359   /// Returns true if \p I is known to be scalar after vectorization.
1360   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1361     if (VF.isScalar())
1362       return true;
1363 
1364     // Cost model is not run in the VPlan-native path - return conservative
1365     // result until this changes.
1366     if (EnableVPlanNativePath)
1367       return false;
1368 
1369     auto ScalarsPerVF = Scalars.find(VF);
1370     assert(ScalarsPerVF != Scalars.end() &&
1371            "Scalar values are not calculated for VF");
1372     return ScalarsPerVF->second.count(I);
1373   }
1374 
1375   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1376   /// for vectorization factor \p VF.
1377   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1378     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1379            !isProfitableToScalarize(I, VF) &&
1380            !isScalarAfterVectorization(I, VF);
1381   }
1382 
1383   /// Decision that was taken during cost calculation for memory instruction.
1384   enum InstWidening {
1385     CM_Unknown,
1386     CM_Widen,         // For consecutive accesses with stride +1.
1387     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1388     CM_Interleave,
1389     CM_GatherScatter,
1390     CM_Scalarize
1391   };
1392 
1393   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1394   /// instruction \p I and vector width \p VF.
1395   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1396                            InstructionCost Cost) {
1397     assert(VF.isVector() && "Expected VF >=2");
1398     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1399   }
1400 
1401   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1402   /// interleaving group \p Grp and vector width \p VF.
1403   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1404                            ElementCount VF, InstWidening W,
1405                            InstructionCost Cost) {
1406     assert(VF.isVector() && "Expected VF >=2");
1407     /// Broadcast this decicion to all instructions inside the group.
1408     /// But the cost will be assigned to one instruction only.
1409     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1410       if (auto *I = Grp->getMember(i)) {
1411         if (Grp->getInsertPos() == I)
1412           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1413         else
1414           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1415       }
1416     }
1417   }
1418 
1419   /// Return the cost model decision for the given instruction \p I and vector
1420   /// width \p VF. Return CM_Unknown if this instruction did not pass
1421   /// through the cost modeling.
1422   InstWidening getWideningDecision(Instruction *I, ElementCount VF) {
1423     assert(VF.isVector() && "Expected VF to be a vector VF");
1424     // Cost model is not run in the VPlan-native path - return conservative
1425     // result until this changes.
1426     if (EnableVPlanNativePath)
1427       return CM_GatherScatter;
1428 
1429     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1430     auto Itr = WideningDecisions.find(InstOnVF);
1431     if (Itr == WideningDecisions.end())
1432       return CM_Unknown;
1433     return Itr->second.first;
1434   }
1435 
1436   /// Return the vectorization cost for the given instruction \p I and vector
1437   /// width \p VF.
1438   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1439     assert(VF.isVector() && "Expected VF >=2");
1440     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1441     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1442            "The cost is not calculated");
1443     return WideningDecisions[InstOnVF].second;
1444   }
1445 
1446   /// Return True if instruction \p I is an optimizable truncate whose operand
1447   /// is an induction variable. Such a truncate will be removed by adding a new
1448   /// induction variable with the destination type.
1449   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1450     // If the instruction is not a truncate, return false.
1451     auto *Trunc = dyn_cast<TruncInst>(I);
1452     if (!Trunc)
1453       return false;
1454 
1455     // Get the source and destination types of the truncate.
1456     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1457     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1458 
1459     // If the truncate is free for the given types, return false. Replacing a
1460     // free truncate with an induction variable would add an induction variable
1461     // update instruction to each iteration of the loop. We exclude from this
1462     // check the primary induction variable since it will need an update
1463     // instruction regardless.
1464     Value *Op = Trunc->getOperand(0);
1465     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1466       return false;
1467 
1468     // If the truncated value is not an induction variable, return false.
1469     return Legal->isInductionPhi(Op);
1470   }
1471 
1472   /// Collects the instructions to scalarize for each predicated instruction in
1473   /// the loop.
1474   void collectInstsToScalarize(ElementCount VF);
1475 
1476   /// Collect Uniform and Scalar values for the given \p VF.
1477   /// The sets depend on CM decision for Load/Store instructions
1478   /// that may be vectorized as interleave, gather-scatter or scalarized.
1479   void collectUniformsAndScalars(ElementCount VF) {
1480     // Do the analysis once.
1481     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1482       return;
1483     setCostBasedWideningDecision(VF);
1484     collectLoopUniforms(VF);
1485     collectLoopScalars(VF);
1486   }
1487 
1488   /// Returns true if the target machine supports masked store operation
1489   /// for the given \p DataType and kind of access to \p Ptr.
1490   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1491     return Legal->isConsecutivePtr(Ptr) &&
1492            TTI.isLegalMaskedStore(DataType, Alignment);
1493   }
1494 
1495   /// Returns true if the target machine supports masked load operation
1496   /// for the given \p DataType and kind of access to \p Ptr.
1497   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1498     return Legal->isConsecutivePtr(Ptr) &&
1499            TTI.isLegalMaskedLoad(DataType, Alignment);
1500   }
1501 
1502   /// Returns true if the target machine supports masked scatter operation
1503   /// for the given \p DataType.
1504   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1505     return TTI.isLegalMaskedScatter(DataType, Alignment);
1506   }
1507 
1508   /// Returns true if the target machine supports masked gather operation
1509   /// for the given \p DataType.
1510   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1511     return TTI.isLegalMaskedGather(DataType, Alignment);
1512   }
1513 
1514   /// Returns true if the target machine can represent \p V as a masked gather
1515   /// or scatter operation.
1516   bool isLegalGatherOrScatter(Value *V) {
1517     bool LI = isa<LoadInst>(V);
1518     bool SI = isa<StoreInst>(V);
1519     if (!LI && !SI)
1520       return false;
1521     auto *Ty = getMemInstValueType(V);
1522     Align Align = getLoadStoreAlignment(V);
1523     return (LI && isLegalMaskedGather(Ty, Align)) ||
1524            (SI && isLegalMaskedScatter(Ty, Align));
1525   }
1526 
1527   /// Returns true if \p I is an instruction that will be scalarized with
1528   /// predication. Such instructions include conditional stores and
1529   /// instructions that may divide by zero.
1530   /// If a non-zero VF has been calculated, we check if I will be scalarized
1531   /// predication for that VF.
1532   bool isScalarWithPredication(Instruction *I,
1533                                ElementCount VF = ElementCount::getFixed(1));
1534 
1535   // Returns true if \p I is an instruction that will be predicated either
1536   // through scalar predication or masked load/store or masked gather/scatter.
1537   // Superset of instructions that return true for isScalarWithPredication.
1538   bool isPredicatedInst(Instruction *I) {
1539     if (!blockNeedsPredication(I->getParent()))
1540       return false;
1541     // Loads and stores that need some form of masked operation are predicated
1542     // instructions.
1543     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1544       return Legal->isMaskRequired(I);
1545     return isScalarWithPredication(I);
1546   }
1547 
1548   /// Returns true if \p I is a memory instruction with consecutive memory
1549   /// access that can be widened.
1550   bool
1551   memoryInstructionCanBeWidened(Instruction *I,
1552                                 ElementCount VF = ElementCount::getFixed(1));
1553 
1554   /// Returns true if \p I is a memory instruction in an interleaved-group
1555   /// of memory accesses that can be vectorized with wide vector loads/stores
1556   /// and shuffles.
1557   bool
1558   interleavedAccessCanBeWidened(Instruction *I,
1559                                 ElementCount VF = ElementCount::getFixed(1));
1560 
1561   /// Check if \p Instr belongs to any interleaved access group.
1562   bool isAccessInterleaved(Instruction *Instr) {
1563     return InterleaveInfo.isInterleaved(Instr);
1564   }
1565 
1566   /// Get the interleaved access group that \p Instr belongs to.
1567   const InterleaveGroup<Instruction> *
1568   getInterleavedAccessGroup(Instruction *Instr) {
1569     return InterleaveInfo.getInterleaveGroup(Instr);
1570   }
1571 
1572   /// Returns true if we're required to use a scalar epilogue for at least
1573   /// the final iteration of the original loop.
1574   bool requiresScalarEpilogue() const {
1575     if (!isScalarEpilogueAllowed())
1576       return false;
1577     // If we might exit from anywhere but the latch, must run the exiting
1578     // iteration in scalar form.
1579     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1580       return true;
1581     return InterleaveInfo.requiresScalarEpilogue();
1582   }
1583 
1584   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1585   /// loop hint annotation.
1586   bool isScalarEpilogueAllowed() const {
1587     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1588   }
1589 
1590   /// Returns true if all loop blocks should be masked to fold tail loop.
1591   bool foldTailByMasking() const { return FoldTailByMasking; }
1592 
1593   bool blockNeedsPredication(BasicBlock *BB) {
1594     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1595   }
1596 
1597   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1598   /// nodes to the chain of instructions representing the reductions. Uses a
1599   /// MapVector to ensure deterministic iteration order.
1600   using ReductionChainMap =
1601       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1602 
1603   /// Return the chain of instructions representing an inloop reduction.
1604   const ReductionChainMap &getInLoopReductionChains() const {
1605     return InLoopReductionChains;
1606   }
1607 
1608   /// Returns true if the Phi is part of an inloop reduction.
1609   bool isInLoopReduction(PHINode *Phi) const {
1610     return InLoopReductionChains.count(Phi);
1611   }
1612 
1613   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1614   /// with factor VF.  Return the cost of the instruction, including
1615   /// scalarization overhead if it's needed.
1616   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF);
1617 
1618   /// Estimate cost of a call instruction CI if it were vectorized with factor
1619   /// VF. Return the cost of the instruction, including scalarization overhead
1620   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1621   /// scalarized -
1622   /// i.e. either vector version isn't available, or is too expensive.
1623   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1624                                     bool &NeedToScalarize);
1625 
1626   /// Invalidates decisions already taken by the cost model.
1627   void invalidateCostModelingDecisions() {
1628     WideningDecisions.clear();
1629     Uniforms.clear();
1630     Scalars.clear();
1631   }
1632 
1633 private:
1634   unsigned NumPredStores = 0;
1635 
1636   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1637   /// than zero. One is returned if vectorization should best be avoided due
1638   /// to cost.
1639   ElementCount computeFeasibleMaxVF(unsigned ConstTripCount,
1640                                     ElementCount UserVF);
1641 
1642   /// The vectorization cost is a combination of the cost itself and a boolean
1643   /// indicating whether any of the contributing operations will actually
1644   /// operate on
1645   /// vector values after type legalization in the backend. If this latter value
1646   /// is
1647   /// false, then all operations will be scalarized (i.e. no vectorization has
1648   /// actually taken place).
1649   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1650 
1651   /// Returns the expected execution cost. The unit of the cost does
1652   /// not matter because we use the 'cost' units to compare different
1653   /// vector widths. The cost that is returned is *not* normalized by
1654   /// the factor width.
1655   VectorizationCostTy expectedCost(ElementCount VF);
1656 
1657   /// Returns the execution time cost of an instruction for a given vector
1658   /// width. Vector width of one means scalar.
1659   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1660 
1661   /// The cost-computation logic from getInstructionCost which provides
1662   /// the vector type as an output parameter.
1663   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1664                                      Type *&VectorTy);
1665 
1666   /// Return the cost of instructions in an inloop reduction pattern, if I is
1667   /// part of that pattern.
1668   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1669                                           Type *VectorTy,
1670                                           TTI::TargetCostKind CostKind);
1671 
1672   /// Calculate vectorization cost of memory instruction \p I.
1673   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1674 
1675   /// The cost computation for scalarized memory instruction.
1676   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1677 
1678   /// The cost computation for interleaving group of memory instructions.
1679   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1680 
1681   /// The cost computation for Gather/Scatter instruction.
1682   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1683 
1684   /// The cost computation for widening instruction \p I with consecutive
1685   /// memory access.
1686   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1687 
1688   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1689   /// Load: scalar load + broadcast.
1690   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1691   /// element)
1692   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1693 
1694   /// Estimate the overhead of scalarizing an instruction. This is a
1695   /// convenience wrapper for the type-based getScalarizationOverhead API.
1696   InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF);
1697 
1698   /// Returns whether the instruction is a load or store and will be a emitted
1699   /// as a vector operation.
1700   bool isConsecutiveLoadOrStore(Instruction *I);
1701 
1702   /// Returns true if an artificially high cost for emulated masked memrefs
1703   /// should be used.
1704   bool useEmulatedMaskMemRefHack(Instruction *I);
1705 
1706   /// Map of scalar integer values to the smallest bitwidth they can be legally
1707   /// represented as. The vector equivalents of these values should be truncated
1708   /// to this type.
1709   MapVector<Instruction *, uint64_t> MinBWs;
1710 
1711   /// A type representing the costs for instructions if they were to be
1712   /// scalarized rather than vectorized. The entries are Instruction-Cost
1713   /// pairs.
1714   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1715 
1716   /// A set containing all BasicBlocks that are known to present after
1717   /// vectorization as a predicated block.
1718   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1719 
1720   /// Records whether it is allowed to have the original scalar loop execute at
1721   /// least once. This may be needed as a fallback loop in case runtime
1722   /// aliasing/dependence checks fail, or to handle the tail/remainder
1723   /// iterations when the trip count is unknown or doesn't divide by the VF,
1724   /// or as a peel-loop to handle gaps in interleave-groups.
1725   /// Under optsize and when the trip count is very small we don't allow any
1726   /// iterations to execute in the scalar loop.
1727   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1728 
1729   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1730   bool FoldTailByMasking = false;
1731 
1732   /// A map holding scalar costs for different vectorization factors. The
1733   /// presence of a cost for an instruction in the mapping indicates that the
1734   /// instruction will be scalarized when vectorizing with the associated
1735   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1736   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1737 
1738   /// Holds the instructions known to be uniform after vectorization.
1739   /// The data is collected per VF.
1740   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1741 
1742   /// Holds the instructions known to be scalar after vectorization.
1743   /// The data is collected per VF.
1744   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1745 
1746   /// Holds the instructions (address computations) that are forced to be
1747   /// scalarized.
1748   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1749 
1750   /// PHINodes of the reductions that should be expanded in-loop along with
1751   /// their associated chains of reduction operations, in program order from top
1752   /// (PHI) to bottom
1753   ReductionChainMap InLoopReductionChains;
1754 
1755   /// A Map of inloop reduction operations and their immediate chain operand.
1756   /// FIXME: This can be removed once reductions can be costed correctly in
1757   /// vplan. This was added to allow quick lookup to the inloop operations,
1758   /// without having to loop through InLoopReductionChains.
1759   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1760 
1761   /// Returns the expected difference in cost from scalarizing the expression
1762   /// feeding a predicated instruction \p PredInst. The instructions to
1763   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1764   /// non-negative return value implies the expression will be scalarized.
1765   /// Currently, only single-use chains are considered for scalarization.
1766   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1767                               ElementCount VF);
1768 
1769   /// Collect the instructions that are uniform after vectorization. An
1770   /// instruction is uniform if we represent it with a single scalar value in
1771   /// the vectorized loop corresponding to each vector iteration. Examples of
1772   /// uniform instructions include pointer operands of consecutive or
1773   /// interleaved memory accesses. Note that although uniformity implies an
1774   /// instruction will be scalar, the reverse is not true. In general, a
1775   /// scalarized instruction will be represented by VF scalar values in the
1776   /// vectorized loop, each corresponding to an iteration of the original
1777   /// scalar loop.
1778   void collectLoopUniforms(ElementCount VF);
1779 
1780   /// Collect the instructions that are scalar after vectorization. An
1781   /// instruction is scalar if it is known to be uniform or will be scalarized
1782   /// during vectorization. Non-uniform scalarized instructions will be
1783   /// represented by VF values in the vectorized loop, each corresponding to an
1784   /// iteration of the original scalar loop.
1785   void collectLoopScalars(ElementCount VF);
1786 
1787   /// Keeps cost model vectorization decision and cost for instructions.
1788   /// Right now it is used for memory instructions only.
1789   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1790                                 std::pair<InstWidening, InstructionCost>>;
1791 
1792   DecisionList WideningDecisions;
1793 
1794   /// Returns true if \p V is expected to be vectorized and it needs to be
1795   /// extracted.
1796   bool needsExtract(Value *V, ElementCount VF) const {
1797     Instruction *I = dyn_cast<Instruction>(V);
1798     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1799         TheLoop->isLoopInvariant(I))
1800       return false;
1801 
1802     // Assume we can vectorize V (and hence we need extraction) if the
1803     // scalars are not computed yet. This can happen, because it is called
1804     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1805     // the scalars are collected. That should be a safe assumption in most
1806     // cases, because we check if the operands have vectorizable types
1807     // beforehand in LoopVectorizationLegality.
1808     return Scalars.find(VF) == Scalars.end() ||
1809            !isScalarAfterVectorization(I, VF);
1810   };
1811 
1812   /// Returns a range containing only operands needing to be extracted.
1813   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1814                                                    ElementCount VF) {
1815     return SmallVector<Value *, 4>(make_filter_range(
1816         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1817   }
1818 
1819   /// Determines if we have the infrastructure to vectorize loop \p L and its
1820   /// epilogue, assuming the main loop is vectorized by \p VF.
1821   bool isCandidateForEpilogueVectorization(const Loop &L,
1822                                            const ElementCount VF) const;
1823 
1824   /// Returns true if epilogue vectorization is considered profitable, and
1825   /// false otherwise.
1826   /// \p VF is the vectorization factor chosen for the original loop.
1827   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1828 
1829 public:
1830   /// The loop that we evaluate.
1831   Loop *TheLoop;
1832 
1833   /// Predicated scalar evolution analysis.
1834   PredicatedScalarEvolution &PSE;
1835 
1836   /// Loop Info analysis.
1837   LoopInfo *LI;
1838 
1839   /// Vectorization legality.
1840   LoopVectorizationLegality *Legal;
1841 
1842   /// Vector target information.
1843   const TargetTransformInfo &TTI;
1844 
1845   /// Target Library Info.
1846   const TargetLibraryInfo *TLI;
1847 
1848   /// Demanded bits analysis.
1849   DemandedBits *DB;
1850 
1851   /// Assumption cache.
1852   AssumptionCache *AC;
1853 
1854   /// Interface to emit optimization remarks.
1855   OptimizationRemarkEmitter *ORE;
1856 
1857   const Function *TheFunction;
1858 
1859   /// Loop Vectorize Hint.
1860   const LoopVectorizeHints *Hints;
1861 
1862   /// The interleave access information contains groups of interleaved accesses
1863   /// with the same stride and close to each other.
1864   InterleavedAccessInfo &InterleaveInfo;
1865 
1866   /// Values to ignore in the cost model.
1867   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1868 
1869   /// Values to ignore in the cost model when VF > 1.
1870   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1871 
1872   /// Profitable vector factors.
1873   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1874 };
1875 
1876 } // end namespace llvm
1877 
1878 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1879 // vectorization. The loop needs to be annotated with #pragma omp simd
1880 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1881 // vector length information is not provided, vectorization is not considered
1882 // explicit. Interleave hints are not allowed either. These limitations will be
1883 // relaxed in the future.
1884 // Please, note that we are currently forced to abuse the pragma 'clang
1885 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1886 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1887 // provides *explicit vectorization hints* (LV can bypass legal checks and
1888 // assume that vectorization is legal). However, both hints are implemented
1889 // using the same metadata (llvm.loop.vectorize, processed by
1890 // LoopVectorizeHints). This will be fixed in the future when the native IR
1891 // representation for pragma 'omp simd' is introduced.
1892 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1893                                    OptimizationRemarkEmitter *ORE) {
1894   assert(!OuterLp->isInnermost() && "This is not an outer loop");
1895   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1896 
1897   // Only outer loops with an explicit vectorization hint are supported.
1898   // Unannotated outer loops are ignored.
1899   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1900     return false;
1901 
1902   Function *Fn = OuterLp->getHeader()->getParent();
1903   if (!Hints.allowVectorization(Fn, OuterLp,
1904                                 true /*VectorizeOnlyWhenForced*/)) {
1905     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1906     return false;
1907   }
1908 
1909   if (Hints.getInterleave() > 1) {
1910     // TODO: Interleave support is future work.
1911     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1912                          "outer loops.\n");
1913     Hints.emitRemarkWithHints();
1914     return false;
1915   }
1916 
1917   return true;
1918 }
1919 
1920 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1921                                   OptimizationRemarkEmitter *ORE,
1922                                   SmallVectorImpl<Loop *> &V) {
1923   // Collect inner loops and outer loops without irreducible control flow. For
1924   // now, only collect outer loops that have explicit vectorization hints. If we
1925   // are stress testing the VPlan H-CFG construction, we collect the outermost
1926   // loop of every loop nest.
1927   if (L.isInnermost() || VPlanBuildStressTest ||
1928       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1929     LoopBlocksRPO RPOT(&L);
1930     RPOT.perform(LI);
1931     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1932       V.push_back(&L);
1933       // TODO: Collect inner loops inside marked outer loops in case
1934       // vectorization fails for the outer loop. Do not invoke
1935       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1936       // already known to be reducible. We can use an inherited attribute for
1937       // that.
1938       return;
1939     }
1940   }
1941   for (Loop *InnerL : L)
1942     collectSupportedLoops(*InnerL, LI, ORE, V);
1943 }
1944 
1945 namespace {
1946 
1947 /// The LoopVectorize Pass.
1948 struct LoopVectorize : public FunctionPass {
1949   /// Pass identification, replacement for typeid
1950   static char ID;
1951 
1952   LoopVectorizePass Impl;
1953 
1954   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1955                          bool VectorizeOnlyWhenForced = false)
1956       : FunctionPass(ID),
1957         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1958     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1959   }
1960 
1961   bool runOnFunction(Function &F) override {
1962     if (skipFunction(F))
1963       return false;
1964 
1965     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1966     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1967     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1968     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1969     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1970     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1971     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1972     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1973     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1974     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1975     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1976     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1977     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1978 
1979     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1980         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1981 
1982     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1983                         GetLAA, *ORE, PSI).MadeAnyChange;
1984   }
1985 
1986   void getAnalysisUsage(AnalysisUsage &AU) const override {
1987     AU.addRequired<AssumptionCacheTracker>();
1988     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1989     AU.addRequired<DominatorTreeWrapperPass>();
1990     AU.addRequired<LoopInfoWrapperPass>();
1991     AU.addRequired<ScalarEvolutionWrapperPass>();
1992     AU.addRequired<TargetTransformInfoWrapperPass>();
1993     AU.addRequired<AAResultsWrapperPass>();
1994     AU.addRequired<LoopAccessLegacyAnalysis>();
1995     AU.addRequired<DemandedBitsWrapperPass>();
1996     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1997     AU.addRequired<InjectTLIMappingsLegacy>();
1998 
1999     // We currently do not preserve loopinfo/dominator analyses with outer loop
2000     // vectorization. Until this is addressed, mark these analyses as preserved
2001     // only for non-VPlan-native path.
2002     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2003     if (!EnableVPlanNativePath) {
2004       AU.addPreserved<LoopInfoWrapperPass>();
2005       AU.addPreserved<DominatorTreeWrapperPass>();
2006     }
2007 
2008     AU.addPreserved<BasicAAWrapperPass>();
2009     AU.addPreserved<GlobalsAAWrapperPass>();
2010     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2011   }
2012 };
2013 
2014 } // end anonymous namespace
2015 
2016 //===----------------------------------------------------------------------===//
2017 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2018 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2019 //===----------------------------------------------------------------------===//
2020 
2021 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2022   // We need to place the broadcast of invariant variables outside the loop,
2023   // but only if it's proven safe to do so. Else, broadcast will be inside
2024   // vector loop body.
2025   Instruction *Instr = dyn_cast<Instruction>(V);
2026   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2027                      (!Instr ||
2028                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2029   // Place the code for broadcasting invariant variables in the new preheader.
2030   IRBuilder<>::InsertPointGuard Guard(Builder);
2031   if (SafeToHoist)
2032     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2033 
2034   // Broadcast the scalar into all locations in the vector.
2035   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2036 
2037   return Shuf;
2038 }
2039 
2040 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2041     const InductionDescriptor &II, Value *Step, Value *Start,
2042     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2043     VPTransformState &State) {
2044   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2045          "Expected either an induction phi-node or a truncate of it!");
2046 
2047   // Construct the initial value of the vector IV in the vector loop preheader
2048   auto CurrIP = Builder.saveIP();
2049   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2050   if (isa<TruncInst>(EntryVal)) {
2051     assert(Start->getType()->isIntegerTy() &&
2052            "Truncation requires an integer type");
2053     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2054     Step = Builder.CreateTrunc(Step, TruncType);
2055     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2056   }
2057   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2058   Value *SteppedStart =
2059       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2060 
2061   // We create vector phi nodes for both integer and floating-point induction
2062   // variables. Here, we determine the kind of arithmetic we will perform.
2063   Instruction::BinaryOps AddOp;
2064   Instruction::BinaryOps MulOp;
2065   if (Step->getType()->isIntegerTy()) {
2066     AddOp = Instruction::Add;
2067     MulOp = Instruction::Mul;
2068   } else {
2069     AddOp = II.getInductionOpcode();
2070     MulOp = Instruction::FMul;
2071   }
2072 
2073   // Multiply the vectorization factor by the step using integer or
2074   // floating-point arithmetic as appropriate.
2075   Value *ConstVF =
2076       getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue());
2077   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2078 
2079   // Create a vector splat to use in the induction update.
2080   //
2081   // FIXME: If the step is non-constant, we create the vector splat with
2082   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2083   //        handle a constant vector splat.
2084   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2085   Value *SplatVF = isa<Constant>(Mul)
2086                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2087                        : Builder.CreateVectorSplat(VF, Mul);
2088   Builder.restoreIP(CurrIP);
2089 
2090   // We may need to add the step a number of times, depending on the unroll
2091   // factor. The last of those goes into the PHI.
2092   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2093                                     &*LoopVectorBody->getFirstInsertionPt());
2094   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2095   Instruction *LastInduction = VecInd;
2096   for (unsigned Part = 0; Part < UF; ++Part) {
2097     State.set(Def, EntryVal, LastInduction, Part);
2098 
2099     if (isa<TruncInst>(EntryVal))
2100       addMetadata(LastInduction, EntryVal);
2101     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2102                                           State, Part);
2103 
2104     LastInduction = cast<Instruction>(addFastMathFlag(
2105         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2106     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2107   }
2108 
2109   // Move the last step to the end of the latch block. This ensures consistent
2110   // placement of all induction updates.
2111   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2112   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2113   auto *ICmp = cast<Instruction>(Br->getCondition());
2114   LastInduction->moveBefore(ICmp);
2115   LastInduction->setName("vec.ind.next");
2116 
2117   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2118   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2119 }
2120 
2121 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2122   return Cost->isScalarAfterVectorization(I, VF) ||
2123          Cost->isProfitableToScalarize(I, VF);
2124 }
2125 
2126 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2127   if (shouldScalarizeInstruction(IV))
2128     return true;
2129   auto isScalarInst = [&](User *U) -> bool {
2130     auto *I = cast<Instruction>(U);
2131     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2132   };
2133   return llvm::any_of(IV->users(), isScalarInst);
2134 }
2135 
2136 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2137     const InductionDescriptor &ID, const Instruction *EntryVal,
2138     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2139     unsigned Part, unsigned Lane) {
2140   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2141          "Expected either an induction phi-node or a truncate of it!");
2142 
2143   // This induction variable is not the phi from the original loop but the
2144   // newly-created IV based on the proof that casted Phi is equal to the
2145   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2146   // re-uses the same InductionDescriptor that original IV uses but we don't
2147   // have to do any recording in this case - that is done when original IV is
2148   // processed.
2149   if (isa<TruncInst>(EntryVal))
2150     return;
2151 
2152   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2153   if (Casts.empty())
2154     return;
2155   // Only the first Cast instruction in the Casts vector is of interest.
2156   // The rest of the Casts (if exist) have no uses outside the
2157   // induction update chain itself.
2158   if (Lane < UINT_MAX)
2159     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2160   else
2161     State.set(CastDef, VectorLoopVal, Part);
2162 }
2163 
2164 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2165                                                 TruncInst *Trunc, VPValue *Def,
2166                                                 VPValue *CastDef,
2167                                                 VPTransformState &State) {
2168   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2169          "Primary induction variable must have an integer type");
2170 
2171   auto II = Legal->getInductionVars().find(IV);
2172   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2173 
2174   auto ID = II->second;
2175   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2176 
2177   // The value from the original loop to which we are mapping the new induction
2178   // variable.
2179   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2180 
2181   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2182 
2183   // Generate code for the induction step. Note that induction steps are
2184   // required to be loop-invariant
2185   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2186     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2187            "Induction step should be loop invariant");
2188     if (PSE.getSE()->isSCEVable(IV->getType())) {
2189       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2190       return Exp.expandCodeFor(Step, Step->getType(),
2191                                LoopVectorPreHeader->getTerminator());
2192     }
2193     return cast<SCEVUnknown>(Step)->getValue();
2194   };
2195 
2196   // The scalar value to broadcast. This is derived from the canonical
2197   // induction variable. If a truncation type is given, truncate the canonical
2198   // induction variable and step. Otherwise, derive these values from the
2199   // induction descriptor.
2200   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2201     Value *ScalarIV = Induction;
2202     if (IV != OldInduction) {
2203       ScalarIV = IV->getType()->isIntegerTy()
2204                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2205                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2206                                           IV->getType());
2207       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2208       ScalarIV->setName("offset.idx");
2209     }
2210     if (Trunc) {
2211       auto *TruncType = cast<IntegerType>(Trunc->getType());
2212       assert(Step->getType()->isIntegerTy() &&
2213              "Truncation requires an integer step");
2214       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2215       Step = Builder.CreateTrunc(Step, TruncType);
2216     }
2217     return ScalarIV;
2218   };
2219 
2220   // Create the vector values from the scalar IV, in the absence of creating a
2221   // vector IV.
2222   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2223     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2224     for (unsigned Part = 0; Part < UF; ++Part) {
2225       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2226       Value *EntryPart =
2227           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2228                         ID.getInductionOpcode());
2229       State.set(Def, EntryVal, EntryPart, Part);
2230       if (Trunc)
2231         addMetadata(EntryPart, Trunc);
2232       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2233                                             State, Part);
2234     }
2235   };
2236 
2237   // Now do the actual transformations, and start with creating the step value.
2238   Value *Step = CreateStepValue(ID.getStep());
2239   if (VF.isZero() || VF.isScalar()) {
2240     Value *ScalarIV = CreateScalarIV(Step);
2241     CreateSplatIV(ScalarIV, Step);
2242     return;
2243   }
2244 
2245   // Determine if we want a scalar version of the induction variable. This is
2246   // true if the induction variable itself is not widened, or if it has at
2247   // least one user in the loop that is not widened.
2248   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2249   if (!NeedsScalarIV) {
2250     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2251                                     State);
2252     return;
2253   }
2254 
2255   // Try to create a new independent vector induction variable. If we can't
2256   // create the phi node, we will splat the scalar induction variable in each
2257   // loop iteration.
2258   if (!shouldScalarizeInstruction(EntryVal)) {
2259     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2260                                     State);
2261     Value *ScalarIV = CreateScalarIV(Step);
2262     // Create scalar steps that can be used by instructions we will later
2263     // scalarize. Note that the addition of the scalar steps will not increase
2264     // the number of instructions in the loop in the common case prior to
2265     // InstCombine. We will be trading one vector extract for each scalar step.
2266     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2267     return;
2268   }
2269 
2270   // All IV users are scalar instructions, so only emit a scalar IV, not a
2271   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2272   // predicate used by the masked loads/stores.
2273   Value *ScalarIV = CreateScalarIV(Step);
2274   if (!Cost->isScalarEpilogueAllowed())
2275     CreateSplatIV(ScalarIV, Step);
2276   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2277 }
2278 
2279 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2280                                           Instruction::BinaryOps BinOp) {
2281   // Create and check the types.
2282   auto *ValVTy = cast<FixedVectorType>(Val->getType());
2283   int VLen = ValVTy->getNumElements();
2284 
2285   Type *STy = Val->getType()->getScalarType();
2286   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2287          "Induction Step must be an integer or FP");
2288   assert(Step->getType() == STy && "Step has wrong type");
2289 
2290   SmallVector<Constant *, 8> Indices;
2291 
2292   if (STy->isIntegerTy()) {
2293     // Create a vector of consecutive numbers from zero to VF.
2294     for (int i = 0; i < VLen; ++i)
2295       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2296 
2297     // Add the consecutive indices to the vector value.
2298     Constant *Cv = ConstantVector::get(Indices);
2299     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2300     Step = Builder.CreateVectorSplat(VLen, Step);
2301     assert(Step->getType() == Val->getType() && "Invalid step vec");
2302     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2303     // which can be found from the original scalar operations.
2304     Step = Builder.CreateMul(Cv, Step);
2305     return Builder.CreateAdd(Val, Step, "induction");
2306   }
2307 
2308   // Floating point induction.
2309   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2310          "Binary Opcode should be specified for FP induction");
2311   // Create a vector of consecutive numbers from zero to VF.
2312   for (int i = 0; i < VLen; ++i)
2313     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2314 
2315   // Add the consecutive indices to the vector value.
2316   Constant *Cv = ConstantVector::get(Indices);
2317 
2318   Step = Builder.CreateVectorSplat(VLen, Step);
2319 
2320   // Floating point operations had to be 'fast' to enable the induction.
2321   FastMathFlags Flags;
2322   Flags.setFast();
2323 
2324   Value *MulOp = Builder.CreateFMul(Cv, Step);
2325   if (isa<Instruction>(MulOp))
2326     // Have to check, MulOp may be a constant
2327     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2328 
2329   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2330   if (isa<Instruction>(BOp))
2331     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2332   return BOp;
2333 }
2334 
2335 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2336                                            Instruction *EntryVal,
2337                                            const InductionDescriptor &ID,
2338                                            VPValue *Def, VPValue *CastDef,
2339                                            VPTransformState &State) {
2340   // We shouldn't have to build scalar steps if we aren't vectorizing.
2341   assert(VF.isVector() && "VF should be greater than one");
2342   // Get the value type and ensure it and the step have the same integer type.
2343   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2344   assert(ScalarIVTy == Step->getType() &&
2345          "Val and Step should have the same type");
2346 
2347   // We build scalar steps for both integer and floating-point induction
2348   // variables. Here, we determine the kind of arithmetic we will perform.
2349   Instruction::BinaryOps AddOp;
2350   Instruction::BinaryOps MulOp;
2351   if (ScalarIVTy->isIntegerTy()) {
2352     AddOp = Instruction::Add;
2353     MulOp = Instruction::Mul;
2354   } else {
2355     AddOp = ID.getInductionOpcode();
2356     MulOp = Instruction::FMul;
2357   }
2358 
2359   // Determine the number of scalars we need to generate for each unroll
2360   // iteration. If EntryVal is uniform, we only need to generate the first
2361   // lane. Otherwise, we generate all VF values.
2362   unsigned Lanes =
2363       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF)
2364           ? 1
2365           : VF.getKnownMinValue();
2366   assert((!VF.isScalable() || Lanes == 1) &&
2367          "Should never scalarize a scalable vector");
2368   // Compute the scalar steps and save the results in VectorLoopValueMap.
2369   for (unsigned Part = 0; Part < UF; ++Part) {
2370     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2371       auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2372                                          ScalarIVTy->getScalarSizeInBits());
2373       Value *StartIdx =
2374           createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2375       if (ScalarIVTy->isFloatingPointTy())
2376         StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy);
2377       StartIdx = addFastMathFlag(Builder.CreateBinOp(
2378           AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane)));
2379       // The step returned by `createStepForVF` is a runtime-evaluated value
2380       // when VF is scalable. Otherwise, it should be folded into a Constant.
2381       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2382              "Expected StartIdx to be folded to a constant when VF is not "
2383              "scalable");
2384       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2385       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2386       State.set(Def, Add, VPIteration(Part, Lane));
2387       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2388                                             Part, Lane);
2389     }
2390   }
2391 }
2392 
2393 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2394   assert(V != Induction && "The new induction variable should not be used.");
2395   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2396   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2397 
2398   // If we have a stride that is replaced by one, do it here. Defer this for
2399   // the VPlan-native path until we start running Legal checks in that path.
2400   if (!EnableVPlanNativePath && Legal->hasStride(V))
2401     V = ConstantInt::get(V->getType(), 1);
2402 
2403   // If we have a vector mapped to this value, return it.
2404   if (VectorLoopValueMap.hasVectorValue(V, Part))
2405     return VectorLoopValueMap.getVectorValue(V, Part);
2406 
2407   // If the value has not been vectorized, check if it has been scalarized
2408   // instead. If it has been scalarized, and we actually need the value in
2409   // vector form, we will construct the vector values on demand.
2410   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2411     Value *ScalarValue =
2412         VectorLoopValueMap.getScalarValue(V, VPIteration(Part, 0));
2413 
2414     // If we've scalarized a value, that value should be an instruction.
2415     auto *I = cast<Instruction>(V);
2416 
2417     // If we aren't vectorizing, we can just copy the scalar map values over to
2418     // the vector map.
2419     if (VF.isScalar()) {
2420       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2421       return ScalarValue;
2422     }
2423 
2424     // Get the last scalar instruction we generated for V and Part. If the value
2425     // is known to be uniform after vectorization, this corresponds to lane zero
2426     // of the Part unroll iteration. Otherwise, the last instruction is the one
2427     // we created for the last vector lane of the Part unroll iteration.
2428     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF)
2429                             ? 0
2430                             : VF.getKnownMinValue() - 1;
2431     assert((!VF.isScalable() || LastLane == 0) &&
2432            "Scalable vectorization can't lead to any scalarized values.");
2433     auto *LastInst = cast<Instruction>(
2434         VectorLoopValueMap.getScalarValue(V, VPIteration(Part, LastLane)));
2435 
2436     // Set the insert point after the last scalarized instruction. This ensures
2437     // the insertelement sequence will directly follow the scalar definitions.
2438     auto OldIP = Builder.saveIP();
2439     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2440     Builder.SetInsertPoint(&*NewIP);
2441 
2442     // However, if we are vectorizing, we need to construct the vector values.
2443     // If the value is known to be uniform after vectorization, we can just
2444     // broadcast the scalar value corresponding to lane zero for each unroll
2445     // iteration. Otherwise, we construct the vector values using insertelement
2446     // instructions. Since the resulting vectors are stored in
2447     // VectorLoopValueMap, we will only generate the insertelements once.
2448     Value *VectorValue = nullptr;
2449     if (Cost->isUniformAfterVectorization(I, VF)) {
2450       VectorValue = getBroadcastInstrs(ScalarValue);
2451       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2452     } else {
2453       // Initialize packing with insertelements to start from poison.
2454       assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2455       Value *Poison = PoisonValue::get(VectorType::get(V->getType(), VF));
2456       VectorLoopValueMap.setVectorValue(V, Part, Poison);
2457       for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
2458         packScalarIntoVectorValue(V, VPIteration(Part, Lane));
2459       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2460     }
2461     Builder.restoreIP(OldIP);
2462     return VectorValue;
2463   }
2464 
2465   // If this scalar is unknown, assume that it is a constant or that it is
2466   // loop invariant. Broadcast V and save the value for future uses.
2467   Value *B = getBroadcastInstrs(V);
2468   VectorLoopValueMap.setVectorValue(V, Part, B);
2469   return B;
2470 }
2471 
2472 Value *
2473 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2474                                             const VPIteration &Instance) {
2475   // If the value is not an instruction contained in the loop, it should
2476   // already be scalar.
2477   if (OrigLoop->isLoopInvariant(V))
2478     return V;
2479 
2480   assert(Instance.Lane > 0
2481              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2482              : true && "Uniform values only have lane zero");
2483 
2484   // If the value from the original loop has not been vectorized, it is
2485   // represented by UF x VF scalar values in the new loop. Return the requested
2486   // scalar value.
2487   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2488     return VectorLoopValueMap.getScalarValue(V, Instance);
2489 
2490   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2491   // for the given unroll part. If this entry is not a vector type (i.e., the
2492   // vectorization factor is one), there is no need to generate an
2493   // extractelement instruction.
2494   auto *U = getOrCreateVectorValue(V, Instance.Part);
2495   if (!U->getType()->isVectorTy()) {
2496     assert(VF.isScalar() && "Value not scalarized has non-vector type");
2497     return U;
2498   }
2499 
2500   // Otherwise, the value from the original loop has been vectorized and is
2501   // represented by UF vector values. Extract and return the requested scalar
2502   // value from the appropriate vector lane.
2503   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2504 }
2505 
2506 void InnerLoopVectorizer::packScalarIntoVectorValue(
2507     Value *V, const VPIteration &Instance) {
2508   assert(V != Induction && "The new induction variable should not be used.");
2509   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2510   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2511 
2512   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2513   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2514   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2515                                             Builder.getInt32(Instance.Lane));
2516   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2517 }
2518 
2519 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2520                                                     const VPIteration &Instance,
2521                                                     VPTransformState &State) {
2522   Value *ScalarInst = State.get(Def, Instance);
2523   Value *VectorValue = State.get(Def, Instance.Part);
2524   VectorValue = Builder.CreateInsertElement(
2525       VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane));
2526   State.set(Def, VectorValue, Instance.Part);
2527 }
2528 
2529 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2530   assert(Vec->getType()->isVectorTy() && "Invalid type");
2531   assert(!VF.isScalable() && "Cannot reverse scalable vectors");
2532   SmallVector<int, 8> ShuffleMask;
2533   for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
2534     ShuffleMask.push_back(VF.getKnownMinValue() - i - 1);
2535 
2536   return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse");
2537 }
2538 
2539 // Return whether we allow using masked interleave-groups (for dealing with
2540 // strided loads/stores that reside in predicated blocks, or for dealing
2541 // with gaps).
2542 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2543   // If an override option has been passed in for interleaved accesses, use it.
2544   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2545     return EnableMaskedInterleavedMemAccesses;
2546 
2547   return TTI.enableMaskedInterleavedAccessVectorization();
2548 }
2549 
2550 // Try to vectorize the interleave group that \p Instr belongs to.
2551 //
2552 // E.g. Translate following interleaved load group (factor = 3):
2553 //   for (i = 0; i < N; i+=3) {
2554 //     R = Pic[i];             // Member of index 0
2555 //     G = Pic[i+1];           // Member of index 1
2556 //     B = Pic[i+2];           // Member of index 2
2557 //     ... // do something to R, G, B
2558 //   }
2559 // To:
2560 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2561 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2562 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2563 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2564 //
2565 // Or translate following interleaved store group (factor = 3):
2566 //   for (i = 0; i < N; i+=3) {
2567 //     ... do something to R, G, B
2568 //     Pic[i]   = R;           // Member of index 0
2569 //     Pic[i+1] = G;           // Member of index 1
2570 //     Pic[i+2] = B;           // Member of index 2
2571 //   }
2572 // To:
2573 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2574 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2575 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2576 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2577 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2578 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2579     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2580     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2581     VPValue *BlockInMask) {
2582   Instruction *Instr = Group->getInsertPos();
2583   const DataLayout &DL = Instr->getModule()->getDataLayout();
2584 
2585   // Prepare for the vector type of the interleaved load/store.
2586   Type *ScalarTy = getMemInstValueType(Instr);
2587   unsigned InterleaveFactor = Group->getFactor();
2588   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2589   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2590 
2591   // Prepare for the new pointers.
2592   SmallVector<Value *, 2> AddrParts;
2593   unsigned Index = Group->getIndex(Instr);
2594 
2595   // TODO: extend the masked interleaved-group support to reversed access.
2596   assert((!BlockInMask || !Group->isReverse()) &&
2597          "Reversed masked interleave-group not supported.");
2598 
2599   // If the group is reverse, adjust the index to refer to the last vector lane
2600   // instead of the first. We adjust the index from the first vector lane,
2601   // rather than directly getting the pointer for lane VF - 1, because the
2602   // pointer operand of the interleaved access is supposed to be uniform. For
2603   // uniform instructions, we're only required to generate a value for the
2604   // first vector lane in each unroll iteration.
2605   assert(!VF.isScalable() &&
2606          "scalable vector reverse operation is not implemented");
2607   if (Group->isReverse())
2608     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2609 
2610   for (unsigned Part = 0; Part < UF; Part++) {
2611     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2612     setDebugLocFromInst(Builder, AddrPart);
2613 
2614     // Notice current instruction could be any index. Need to adjust the address
2615     // to the member of index 0.
2616     //
2617     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2618     //       b = A[i];       // Member of index 0
2619     // Current pointer is pointed to A[i+1], adjust it to A[i].
2620     //
2621     // E.g.  A[i+1] = a;     // Member of index 1
2622     //       A[i]   = b;     // Member of index 0
2623     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2624     // Current pointer is pointed to A[i+2], adjust it to A[i].
2625 
2626     bool InBounds = false;
2627     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2628       InBounds = gep->isInBounds();
2629     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2630     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2631 
2632     // Cast to the vector pointer type.
2633     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2634     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2635     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2636   }
2637 
2638   setDebugLocFromInst(Builder, Instr);
2639   Value *PoisonVec = PoisonValue::get(VecTy);
2640 
2641   Value *MaskForGaps = nullptr;
2642   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2643     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2644     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2645     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2646   }
2647 
2648   // Vectorize the interleaved load group.
2649   if (isa<LoadInst>(Instr)) {
2650     // For each unroll part, create a wide load for the group.
2651     SmallVector<Value *, 2> NewLoads;
2652     for (unsigned Part = 0; Part < UF; Part++) {
2653       Instruction *NewLoad;
2654       if (BlockInMask || MaskForGaps) {
2655         assert(useMaskedInterleavedAccesses(*TTI) &&
2656                "masked interleaved groups are not allowed.");
2657         Value *GroupMask = MaskForGaps;
2658         if (BlockInMask) {
2659           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2660           assert(!VF.isScalable() && "scalable vectors not yet supported.");
2661           Value *ShuffledMask = Builder.CreateShuffleVector(
2662               BlockInMaskPart,
2663               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2664               "interleaved.mask");
2665           GroupMask = MaskForGaps
2666                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2667                                                 MaskForGaps)
2668                           : ShuffledMask;
2669         }
2670         NewLoad =
2671             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2672                                      GroupMask, PoisonVec, "wide.masked.vec");
2673       }
2674       else
2675         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2676                                             Group->getAlign(), "wide.vec");
2677       Group->addMetadata(NewLoad);
2678       NewLoads.push_back(NewLoad);
2679     }
2680 
2681     // For each member in the group, shuffle out the appropriate data from the
2682     // wide loads.
2683     unsigned J = 0;
2684     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2685       Instruction *Member = Group->getMember(I);
2686 
2687       // Skip the gaps in the group.
2688       if (!Member)
2689         continue;
2690 
2691       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2692       auto StrideMask =
2693           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2694       for (unsigned Part = 0; Part < UF; Part++) {
2695         Value *StridedVec = Builder.CreateShuffleVector(
2696             NewLoads[Part], StrideMask, "strided.vec");
2697 
2698         // If this member has different type, cast the result type.
2699         if (Member->getType() != ScalarTy) {
2700           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2701           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2702           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2703         }
2704 
2705         if (Group->isReverse())
2706           StridedVec = reverseVector(StridedVec);
2707 
2708         State.set(VPDefs[J], Member, StridedVec, Part);
2709       }
2710       ++J;
2711     }
2712     return;
2713   }
2714 
2715   // The sub vector type for current instruction.
2716   assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2717   auto *SubVT = VectorType::get(ScalarTy, VF);
2718 
2719   // Vectorize the interleaved store group.
2720   for (unsigned Part = 0; Part < UF; Part++) {
2721     // Collect the stored vector from each member.
2722     SmallVector<Value *, 4> StoredVecs;
2723     for (unsigned i = 0; i < InterleaveFactor; i++) {
2724       // Interleaved store group doesn't allow a gap, so each index has a member
2725       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2726 
2727       Value *StoredVec = State.get(StoredValues[i], Part);
2728 
2729       if (Group->isReverse())
2730         StoredVec = reverseVector(StoredVec);
2731 
2732       // If this member has different type, cast it to a unified type.
2733 
2734       if (StoredVec->getType() != SubVT)
2735         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2736 
2737       StoredVecs.push_back(StoredVec);
2738     }
2739 
2740     // Concatenate all vectors into a wide vector.
2741     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2742 
2743     // Interleave the elements in the wide vector.
2744     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2745     Value *IVec = Builder.CreateShuffleVector(
2746         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2747         "interleaved.vec");
2748 
2749     Instruction *NewStoreInstr;
2750     if (BlockInMask) {
2751       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2752       Value *ShuffledMask = Builder.CreateShuffleVector(
2753           BlockInMaskPart,
2754           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2755           "interleaved.mask");
2756       NewStoreInstr = Builder.CreateMaskedStore(
2757           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2758     }
2759     else
2760       NewStoreInstr =
2761           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2762 
2763     Group->addMetadata(NewStoreInstr);
2764   }
2765 }
2766 
2767 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2768     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2769     VPValue *StoredValue, VPValue *BlockInMask) {
2770   // Attempt to issue a wide load.
2771   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2772   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2773 
2774   assert((LI || SI) && "Invalid Load/Store instruction");
2775   assert((!SI || StoredValue) && "No stored value provided for widened store");
2776   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2777 
2778   LoopVectorizationCostModel::InstWidening Decision =
2779       Cost->getWideningDecision(Instr, VF);
2780   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2781           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2782           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2783          "CM decision is not to widen the memory instruction");
2784 
2785   Type *ScalarDataTy = getMemInstValueType(Instr);
2786 
2787   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2788   const Align Alignment = getLoadStoreAlignment(Instr);
2789 
2790   // Determine if the pointer operand of the access is either consecutive or
2791   // reverse consecutive.
2792   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2793   bool ConsecutiveStride =
2794       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2795   bool CreateGatherScatter =
2796       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2797 
2798   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2799   // gather/scatter. Otherwise Decision should have been to Scalarize.
2800   assert((ConsecutiveStride || CreateGatherScatter) &&
2801          "The instruction should be scalarized");
2802   (void)ConsecutiveStride;
2803 
2804   VectorParts BlockInMaskParts(UF);
2805   bool isMaskRequired = BlockInMask;
2806   if (isMaskRequired)
2807     for (unsigned Part = 0; Part < UF; ++Part)
2808       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2809 
2810   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2811     // Calculate the pointer for the specific unroll-part.
2812     GetElementPtrInst *PartPtr = nullptr;
2813 
2814     bool InBounds = false;
2815     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2816       InBounds = gep->isInBounds();
2817 
2818     if (Reverse) {
2819       assert(!VF.isScalable() &&
2820              "Reversing vectors is not yet supported for scalable vectors.");
2821 
2822       // If the address is consecutive but reversed, then the
2823       // wide store needs to start at the last vector element.
2824       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2825           ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue())));
2826       PartPtr->setIsInBounds(InBounds);
2827       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2828           ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue())));
2829       PartPtr->setIsInBounds(InBounds);
2830       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2831         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2832     } else {
2833       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2834       PartPtr = cast<GetElementPtrInst>(
2835           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2836       PartPtr->setIsInBounds(InBounds);
2837     }
2838 
2839     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2840     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2841   };
2842 
2843   // Handle Stores:
2844   if (SI) {
2845     setDebugLocFromInst(Builder, SI);
2846 
2847     for (unsigned Part = 0; Part < UF; ++Part) {
2848       Instruction *NewSI = nullptr;
2849       Value *StoredVal = State.get(StoredValue, Part);
2850       if (CreateGatherScatter) {
2851         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2852         Value *VectorGep = State.get(Addr, Part);
2853         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2854                                             MaskPart);
2855       } else {
2856         if (Reverse) {
2857           // If we store to reverse consecutive memory locations, then we need
2858           // to reverse the order of elements in the stored value.
2859           StoredVal = reverseVector(StoredVal);
2860           // We don't want to update the value in the map as it might be used in
2861           // another expression. So don't call resetVectorValue(StoredVal).
2862         }
2863         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2864         if (isMaskRequired)
2865           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2866                                             BlockInMaskParts[Part]);
2867         else
2868           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2869       }
2870       addMetadata(NewSI, SI);
2871     }
2872     return;
2873   }
2874 
2875   // Handle loads.
2876   assert(LI && "Must have a load instruction");
2877   setDebugLocFromInst(Builder, LI);
2878   for (unsigned Part = 0; Part < UF; ++Part) {
2879     Value *NewLI;
2880     if (CreateGatherScatter) {
2881       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2882       Value *VectorGep = State.get(Addr, Part);
2883       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2884                                          nullptr, "wide.masked.gather");
2885       addMetadata(NewLI, LI);
2886     } else {
2887       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2888       if (isMaskRequired)
2889         NewLI = Builder.CreateMaskedLoad(
2890             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2891             "wide.masked.load");
2892       else
2893         NewLI =
2894             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2895 
2896       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2897       addMetadata(NewLI, LI);
2898       if (Reverse)
2899         NewLI = reverseVector(NewLI);
2900     }
2901 
2902     State.set(Def, Instr, NewLI, Part);
2903   }
2904 }
2905 
2906 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User,
2907                                                const VPIteration &Instance,
2908                                                bool IfPredicateInstr,
2909                                                VPTransformState &State) {
2910   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2911 
2912   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2913   // the first lane and part.
2914   if (isa<NoAliasScopeDeclInst>(Instr))
2915     if (!Instance.isFirstIteration())
2916       return;
2917 
2918   setDebugLocFromInst(Builder, Instr);
2919 
2920   // Does this instruction return a value ?
2921   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2922 
2923   Instruction *Cloned = Instr->clone();
2924   if (!IsVoidRetTy)
2925     Cloned->setName(Instr->getName() + ".cloned");
2926 
2927   // Replace the operands of the cloned instructions with their scalar
2928   // equivalents in the new loop.
2929   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2930     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
2931     auto InputInstance = Instance;
2932     if (!Operand || !OrigLoop->contains(Operand) ||
2933         (Cost->isUniformAfterVectorization(Operand, State.VF)))
2934       InputInstance.Lane = 0;
2935     auto *NewOp = State.get(User.getOperand(op), InputInstance);
2936     Cloned->setOperand(op, NewOp);
2937   }
2938   addNewMetadata(Cloned, Instr);
2939 
2940   // Place the cloned scalar in the new loop.
2941   Builder.Insert(Cloned);
2942 
2943   // TODO: Set result for VPValue of VPReciplicateRecipe. This requires
2944   // representing scalar values in VPTransformState. Add the cloned scalar to
2945   // the scalar map entry.
2946   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2947 
2948   // If we just cloned a new assumption, add it the assumption cache.
2949   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2950     if (II->getIntrinsicID() == Intrinsic::assume)
2951       AC->registerAssumption(II);
2952 
2953   // End if-block.
2954   if (IfPredicateInstr)
2955     PredicatedInstructions.push_back(Cloned);
2956 }
2957 
2958 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2959                                                       Value *End, Value *Step,
2960                                                       Instruction *DL) {
2961   BasicBlock *Header = L->getHeader();
2962   BasicBlock *Latch = L->getLoopLatch();
2963   // As we're just creating this loop, it's possible no latch exists
2964   // yet. If so, use the header as this will be a single block loop.
2965   if (!Latch)
2966     Latch = Header;
2967 
2968   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2969   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2970   setDebugLocFromInst(Builder, OldInst);
2971   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2972 
2973   Builder.SetInsertPoint(Latch->getTerminator());
2974   setDebugLocFromInst(Builder, OldInst);
2975 
2976   // Create i+1 and fill the PHINode.
2977   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2978   Induction->addIncoming(Start, L->getLoopPreheader());
2979   Induction->addIncoming(Next, Latch);
2980   // Create the compare.
2981   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2982   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
2983 
2984   // Now we have two terminators. Remove the old one from the block.
2985   Latch->getTerminator()->eraseFromParent();
2986 
2987   return Induction;
2988 }
2989 
2990 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2991   if (TripCount)
2992     return TripCount;
2993 
2994   assert(L && "Create Trip Count for null loop.");
2995   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2996   // Find the loop boundaries.
2997   ScalarEvolution *SE = PSE.getSE();
2998   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2999   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3000          "Invalid loop count");
3001 
3002   Type *IdxTy = Legal->getWidestInductionType();
3003   assert(IdxTy && "No type for induction");
3004 
3005   // The exit count might have the type of i64 while the phi is i32. This can
3006   // happen if we have an induction variable that is sign extended before the
3007   // compare. The only way that we get a backedge taken count is that the
3008   // induction variable was signed and as such will not overflow. In such a case
3009   // truncation is legal.
3010   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3011       IdxTy->getPrimitiveSizeInBits())
3012     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3013   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3014 
3015   // Get the total trip count from the count by adding 1.
3016   const SCEV *ExitCount = SE->getAddExpr(
3017       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3018 
3019   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3020 
3021   // Expand the trip count and place the new instructions in the preheader.
3022   // Notice that the pre-header does not change, only the loop body.
3023   SCEVExpander Exp(*SE, DL, "induction");
3024 
3025   // Count holds the overall loop count (N).
3026   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3027                                 L->getLoopPreheader()->getTerminator());
3028 
3029   if (TripCount->getType()->isPointerTy())
3030     TripCount =
3031         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3032                                     L->getLoopPreheader()->getTerminator());
3033 
3034   return TripCount;
3035 }
3036 
3037 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3038   if (VectorTripCount)
3039     return VectorTripCount;
3040 
3041   Value *TC = getOrCreateTripCount(L);
3042   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3043 
3044   Type *Ty = TC->getType();
3045   // This is where we can make the step a runtime constant.
3046   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3047 
3048   // If the tail is to be folded by masking, round the number of iterations N
3049   // up to a multiple of Step instead of rounding down. This is done by first
3050   // adding Step-1 and then rounding down. Note that it's ok if this addition
3051   // overflows: the vector induction variable will eventually wrap to zero given
3052   // that it starts at zero and its Step is a power of two; the loop will then
3053   // exit, with the last early-exit vector comparison also producing all-true.
3054   if (Cost->foldTailByMasking()) {
3055     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3056            "VF*UF must be a power of 2 when folding tail by masking");
3057     assert(!VF.isScalable() &&
3058            "Tail folding not yet supported for scalable vectors");
3059     TC = Builder.CreateAdd(
3060         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3061   }
3062 
3063   // Now we need to generate the expression for the part of the loop that the
3064   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3065   // iterations are not required for correctness, or N - Step, otherwise. Step
3066   // is equal to the vectorization factor (number of SIMD elements) times the
3067   // unroll factor (number of SIMD instructions).
3068   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3069 
3070   // There are two cases where we need to ensure (at least) the last iteration
3071   // runs in the scalar remainder loop. Thus, if the step evenly divides
3072   // the trip count, we set the remainder to be equal to the step. If the step
3073   // does not evenly divide the trip count, no adjustment is necessary since
3074   // there will already be scalar iterations. Note that the minimum iterations
3075   // check ensures that N >= Step. The cases are:
3076   // 1) If there is a non-reversed interleaved group that may speculatively
3077   //    access memory out-of-bounds.
3078   // 2) If any instruction may follow a conditionally taken exit. That is, if
3079   //    the loop contains multiple exiting blocks, or a single exiting block
3080   //    which is not the latch.
3081   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3082     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3083     R = Builder.CreateSelect(IsZero, Step, R);
3084   }
3085 
3086   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3087 
3088   return VectorTripCount;
3089 }
3090 
3091 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3092                                                    const DataLayout &DL) {
3093   // Verify that V is a vector type with same number of elements as DstVTy.
3094   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3095   unsigned VF = DstFVTy->getNumElements();
3096   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3097   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3098   Type *SrcElemTy = SrcVecTy->getElementType();
3099   Type *DstElemTy = DstFVTy->getElementType();
3100   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3101          "Vector elements must have same size");
3102 
3103   // Do a direct cast if element types are castable.
3104   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3105     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3106   }
3107   // V cannot be directly casted to desired vector type.
3108   // May happen when V is a floating point vector but DstVTy is a vector of
3109   // pointers or vice-versa. Handle this using a two-step bitcast using an
3110   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3111   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3112          "Only one type should be a pointer type");
3113   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3114          "Only one type should be a floating point type");
3115   Type *IntTy =
3116       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3117   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3118   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3119   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3120 }
3121 
3122 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3123                                                          BasicBlock *Bypass) {
3124   Value *Count = getOrCreateTripCount(L);
3125   // Reuse existing vector loop preheader for TC checks.
3126   // Note that new preheader block is generated for vector loop.
3127   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3128   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3129 
3130   // Generate code to check if the loop's trip count is less than VF * UF, or
3131   // equal to it in case a scalar epilogue is required; this implies that the
3132   // vector trip count is zero. This check also covers the case where adding one
3133   // to the backedge-taken count overflowed leading to an incorrect trip count
3134   // of zero. In this case we will also jump to the scalar loop.
3135   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3136                                           : ICmpInst::ICMP_ULT;
3137 
3138   // If tail is to be folded, vector loop takes care of all iterations.
3139   Value *CheckMinIters = Builder.getFalse();
3140   if (!Cost->foldTailByMasking()) {
3141     Value *Step =
3142         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3143     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3144   }
3145   // Create new preheader for vector loop.
3146   LoopVectorPreHeader =
3147       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3148                  "vector.ph");
3149 
3150   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3151                                DT->getNode(Bypass)->getIDom()) &&
3152          "TC check is expected to dominate Bypass");
3153 
3154   // Update dominator for Bypass & LoopExit.
3155   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3156   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3157 
3158   ReplaceInstWithInst(
3159       TCCheckBlock->getTerminator(),
3160       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3161   LoopBypassBlocks.push_back(TCCheckBlock);
3162 }
3163 
3164 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3165   // Reuse existing vector loop preheader for SCEV checks.
3166   // Note that new preheader block is generated for vector loop.
3167   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
3168 
3169   // Generate the code to check that the SCEV assumptions that we made.
3170   // We want the new basic block to start at the first instruction in a
3171   // sequence of instructions that form a check.
3172   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3173                    "scev.check");
3174   Value *SCEVCheck = Exp.expandCodeForPredicate(
3175       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
3176 
3177   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3178     if (C->isZero())
3179       return;
3180 
3181   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3182            (OptForSizeBasedOnProfile &&
3183             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3184          "Cannot SCEV check stride or overflow when optimizing for size");
3185 
3186   SCEVCheckBlock->setName("vector.scevcheck");
3187   // Create new preheader for vector loop.
3188   LoopVectorPreHeader =
3189       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
3190                  nullptr, "vector.ph");
3191 
3192   // Update dominator only if this is first RT check.
3193   if (LoopBypassBlocks.empty()) {
3194     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3195     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3196   }
3197 
3198   ReplaceInstWithInst(
3199       SCEVCheckBlock->getTerminator(),
3200       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
3201   LoopBypassBlocks.push_back(SCEVCheckBlock);
3202   AddedSafetyChecks = true;
3203 }
3204 
3205 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3206   // VPlan-native path does not do any analysis for runtime checks currently.
3207   if (EnableVPlanNativePath)
3208     return;
3209 
3210   // Reuse existing vector loop preheader for runtime memory checks.
3211   // Note that new preheader block is generated for vector loop.
3212   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
3213 
3214   // Generate the code that checks in runtime if arrays overlap. We put the
3215   // checks into a separate block to make the more common case of few elements
3216   // faster.
3217   auto *LAI = Legal->getLAI();
3218   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
3219   if (!RtPtrChecking.Need)
3220     return;
3221 
3222   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3223     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3224            "Cannot emit memory checks when optimizing for size, unless forced "
3225            "to vectorize.");
3226     ORE->emit([&]() {
3227       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3228                                         L->getStartLoc(), L->getHeader())
3229              << "Code-size may be reduced by not forcing "
3230                 "vectorization, or by source-code modifications "
3231                 "eliminating the need for runtime checks "
3232                 "(e.g., adding 'restrict').";
3233     });
3234   }
3235 
3236   MemCheckBlock->setName("vector.memcheck");
3237   // Create new preheader for vector loop.
3238   LoopVectorPreHeader =
3239       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
3240                  "vector.ph");
3241 
3242   auto *CondBranch = cast<BranchInst>(
3243       Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader));
3244   ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch);
3245   LoopBypassBlocks.push_back(MemCheckBlock);
3246   AddedSafetyChecks = true;
3247 
3248   // Update dominator only if this is first RT check.
3249   if (LoopBypassBlocks.empty()) {
3250     DT->changeImmediateDominator(Bypass, MemCheckBlock);
3251     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
3252   }
3253 
3254   Instruction *FirstCheckInst;
3255   Instruction *MemRuntimeCheck;
3256   SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(),
3257                    "induction");
3258   std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks(
3259       MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp);
3260   assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
3261                             "claimed checks are required");
3262   CondBranch->setCondition(MemRuntimeCheck);
3263 
3264   // We currently don't use LoopVersioning for the actual loop cloning but we
3265   // still use it to add the noalias metadata.
3266   LVer = std::make_unique<LoopVersioning>(
3267       *Legal->getLAI(),
3268       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3269       DT, PSE.getSE());
3270   LVer->prepareNoAliasMetadata();
3271 }
3272 
3273 Value *InnerLoopVectorizer::emitTransformedIndex(
3274     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3275     const InductionDescriptor &ID) const {
3276 
3277   SCEVExpander Exp(*SE, DL, "induction");
3278   auto Step = ID.getStep();
3279   auto StartValue = ID.getStartValue();
3280   assert(Index->getType() == Step->getType() &&
3281          "Index type does not match StepValue type");
3282 
3283   // Note: the IR at this point is broken. We cannot use SE to create any new
3284   // SCEV and then expand it, hoping that SCEV's simplification will give us
3285   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3286   // lead to various SCEV crashes. So all we can do is to use builder and rely
3287   // on InstCombine for future simplifications. Here we handle some trivial
3288   // cases only.
3289   auto CreateAdd = [&B](Value *X, Value *Y) {
3290     assert(X->getType() == Y->getType() && "Types don't match!");
3291     if (auto *CX = dyn_cast<ConstantInt>(X))
3292       if (CX->isZero())
3293         return Y;
3294     if (auto *CY = dyn_cast<ConstantInt>(Y))
3295       if (CY->isZero())
3296         return X;
3297     return B.CreateAdd(X, Y);
3298   };
3299 
3300   auto CreateMul = [&B](Value *X, Value *Y) {
3301     assert(X->getType() == Y->getType() && "Types don't match!");
3302     if (auto *CX = dyn_cast<ConstantInt>(X))
3303       if (CX->isOne())
3304         return Y;
3305     if (auto *CY = dyn_cast<ConstantInt>(Y))
3306       if (CY->isOne())
3307         return X;
3308     return B.CreateMul(X, Y);
3309   };
3310 
3311   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3312   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3313   // the DomTree is not kept up-to-date for additional blocks generated in the
3314   // vector loop. By using the header as insertion point, we guarantee that the
3315   // expanded instructions dominate all their uses.
3316   auto GetInsertPoint = [this, &B]() {
3317     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3318     if (InsertBB != LoopVectorBody &&
3319         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3320       return LoopVectorBody->getTerminator();
3321     return &*B.GetInsertPoint();
3322   };
3323   switch (ID.getKind()) {
3324   case InductionDescriptor::IK_IntInduction: {
3325     assert(Index->getType() == StartValue->getType() &&
3326            "Index type does not match StartValue type");
3327     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3328       return B.CreateSub(StartValue, Index);
3329     auto *Offset = CreateMul(
3330         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3331     return CreateAdd(StartValue, Offset);
3332   }
3333   case InductionDescriptor::IK_PtrInduction: {
3334     assert(isa<SCEVConstant>(Step) &&
3335            "Expected constant step for pointer induction");
3336     return B.CreateGEP(
3337         StartValue->getType()->getPointerElementType(), StartValue,
3338         CreateMul(Index,
3339                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3340   }
3341   case InductionDescriptor::IK_FpInduction: {
3342     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3343     auto InductionBinOp = ID.getInductionBinOp();
3344     assert(InductionBinOp &&
3345            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3346             InductionBinOp->getOpcode() == Instruction::FSub) &&
3347            "Original bin op should be defined for FP induction");
3348 
3349     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3350 
3351     // Floating point operations had to be 'fast' to enable the induction.
3352     FastMathFlags Flags;
3353     Flags.setFast();
3354 
3355     Value *MulExp = B.CreateFMul(StepValue, Index);
3356     if (isa<Instruction>(MulExp))
3357       // We have to check, the MulExp may be a constant.
3358       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
3359 
3360     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3361                                "induction");
3362     if (isa<Instruction>(BOp))
3363       cast<Instruction>(BOp)->setFastMathFlags(Flags);
3364 
3365     return BOp;
3366   }
3367   case InductionDescriptor::IK_NoInduction:
3368     return nullptr;
3369   }
3370   llvm_unreachable("invalid enum");
3371 }
3372 
3373 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3374   LoopScalarBody = OrigLoop->getHeader();
3375   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3376   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3377   assert(LoopExitBlock && "Must have an exit block");
3378   assert(LoopVectorPreHeader && "Invalid loop structure");
3379 
3380   LoopMiddleBlock =
3381       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3382                  LI, nullptr, Twine(Prefix) + "middle.block");
3383   LoopScalarPreHeader =
3384       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3385                  nullptr, Twine(Prefix) + "scalar.ph");
3386 
3387   // Set up branch from middle block to the exit and scalar preheader blocks.
3388   // completeLoopSkeleton will update the condition to use an iteration check,
3389   // if required to decide whether to execute the remainder.
3390   BranchInst *BrInst =
3391       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3392   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3393   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3394   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3395 
3396   // We intentionally don't let SplitBlock to update LoopInfo since
3397   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3398   // LoopVectorBody is explicitly added to the correct place few lines later.
3399   LoopVectorBody =
3400       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3401                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3402 
3403   // Update dominator for loop exit.
3404   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3405 
3406   // Create and register the new vector loop.
3407   Loop *Lp = LI->AllocateLoop();
3408   Loop *ParentLoop = OrigLoop->getParentLoop();
3409 
3410   // Insert the new loop into the loop nest and register the new basic blocks
3411   // before calling any utilities such as SCEV that require valid LoopInfo.
3412   if (ParentLoop) {
3413     ParentLoop->addChildLoop(Lp);
3414   } else {
3415     LI->addTopLevelLoop(Lp);
3416   }
3417   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3418   return Lp;
3419 }
3420 
3421 void InnerLoopVectorizer::createInductionResumeValues(
3422     Loop *L, Value *VectorTripCount,
3423     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3424   assert(VectorTripCount && L && "Expected valid arguments");
3425   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3426           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3427          "Inconsistent information about additional bypass.");
3428   // We are going to resume the execution of the scalar loop.
3429   // Go over all of the induction variables that we found and fix the
3430   // PHIs that are left in the scalar version of the loop.
3431   // The starting values of PHI nodes depend on the counter of the last
3432   // iteration in the vectorized loop.
3433   // If we come from a bypass edge then we need to start from the original
3434   // start value.
3435   for (auto &InductionEntry : Legal->getInductionVars()) {
3436     PHINode *OrigPhi = InductionEntry.first;
3437     InductionDescriptor II = InductionEntry.second;
3438 
3439     // Create phi nodes to merge from the  backedge-taken check block.
3440     PHINode *BCResumeVal =
3441         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3442                         LoopScalarPreHeader->getTerminator());
3443     // Copy original phi DL over to the new one.
3444     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3445     Value *&EndValue = IVEndValues[OrigPhi];
3446     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3447     if (OrigPhi == OldInduction) {
3448       // We know what the end value is.
3449       EndValue = VectorTripCount;
3450     } else {
3451       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3452       Type *StepType = II.getStep()->getType();
3453       Instruction::CastOps CastOp =
3454           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3455       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3456       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3457       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3458       EndValue->setName("ind.end");
3459 
3460       // Compute the end value for the additional bypass (if applicable).
3461       if (AdditionalBypass.first) {
3462         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3463         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3464                                          StepType, true);
3465         CRD =
3466             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3467         EndValueFromAdditionalBypass =
3468             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3469         EndValueFromAdditionalBypass->setName("ind.end");
3470       }
3471     }
3472     // The new PHI merges the original incoming value, in case of a bypass,
3473     // or the value at the end of the vectorized loop.
3474     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3475 
3476     // Fix the scalar body counter (PHI node).
3477     // The old induction's phi node in the scalar body needs the truncated
3478     // value.
3479     for (BasicBlock *BB : LoopBypassBlocks)
3480       BCResumeVal->addIncoming(II.getStartValue(), BB);
3481 
3482     if (AdditionalBypass.first)
3483       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3484                                             EndValueFromAdditionalBypass);
3485 
3486     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3487   }
3488 }
3489 
3490 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3491                                                       MDNode *OrigLoopID) {
3492   assert(L && "Expected valid loop.");
3493 
3494   // The trip counts should be cached by now.
3495   Value *Count = getOrCreateTripCount(L);
3496   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3497 
3498   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3499 
3500   // Add a check in the middle block to see if we have completed
3501   // all of the iterations in the first vector loop.
3502   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3503   // If tail is to be folded, we know we don't need to run the remainder.
3504   if (!Cost->foldTailByMasking()) {
3505     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3506                                         Count, VectorTripCount, "cmp.n",
3507                                         LoopMiddleBlock->getTerminator());
3508 
3509     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3510     // of the corresponding compare because they may have ended up with
3511     // different line numbers and we want to avoid awkward line stepping while
3512     // debugging. Eg. if the compare has got a line number inside the loop.
3513     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3514     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3515   }
3516 
3517   // Get ready to start creating new instructions into the vectorized body.
3518   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3519          "Inconsistent vector loop preheader");
3520   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3521 
3522   Optional<MDNode *> VectorizedLoopID =
3523       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3524                                       LLVMLoopVectorizeFollowupVectorized});
3525   if (VectorizedLoopID.hasValue()) {
3526     L->setLoopID(VectorizedLoopID.getValue());
3527 
3528     // Do not setAlreadyVectorized if loop attributes have been defined
3529     // explicitly.
3530     return LoopVectorPreHeader;
3531   }
3532 
3533   // Keep all loop hints from the original loop on the vector loop (we'll
3534   // replace the vectorizer-specific hints below).
3535   if (MDNode *LID = OrigLoop->getLoopID())
3536     L->setLoopID(LID);
3537 
3538   LoopVectorizeHints Hints(L, true, *ORE);
3539   Hints.setAlreadyVectorized();
3540 
3541 #ifdef EXPENSIVE_CHECKS
3542   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3543   LI->verify(*DT);
3544 #endif
3545 
3546   return LoopVectorPreHeader;
3547 }
3548 
3549 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3550   /*
3551    In this function we generate a new loop. The new loop will contain
3552    the vectorized instructions while the old loop will continue to run the
3553    scalar remainder.
3554 
3555        [ ] <-- loop iteration number check.
3556     /   |
3557    /    v
3558   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3559   |  /  |
3560   | /   v
3561   ||   [ ]     <-- vector pre header.
3562   |/    |
3563   |     v
3564   |    [  ] \
3565   |    [  ]_|   <-- vector loop.
3566   |     |
3567   |     v
3568   |   -[ ]   <--- middle-block.
3569   |  /  |
3570   | /   v
3571   -|- >[ ]     <--- new preheader.
3572    |    |
3573    |    v
3574    |   [ ] \
3575    |   [ ]_|   <-- old scalar loop to handle remainder.
3576     \   |
3577      \  v
3578       >[ ]     <-- exit block.
3579    ...
3580    */
3581 
3582   // Get the metadata of the original loop before it gets modified.
3583   MDNode *OrigLoopID = OrigLoop->getLoopID();
3584 
3585   // Create an empty vector loop, and prepare basic blocks for the runtime
3586   // checks.
3587   Loop *Lp = createVectorLoopSkeleton("");
3588 
3589   // Now, compare the new count to zero. If it is zero skip the vector loop and
3590   // jump to the scalar loop. This check also covers the case where the
3591   // backedge-taken count is uint##_max: adding one to it will overflow leading
3592   // to an incorrect trip count of zero. In this (rare) case we will also jump
3593   // to the scalar loop.
3594   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3595 
3596   // Generate the code to check any assumptions that we've made for SCEV
3597   // expressions.
3598   emitSCEVChecks(Lp, LoopScalarPreHeader);
3599 
3600   // Generate the code that checks in runtime if arrays overlap. We put the
3601   // checks into a separate block to make the more common case of few elements
3602   // faster.
3603   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3604 
3605   // Some loops have a single integer induction variable, while other loops
3606   // don't. One example is c++ iterators that often have multiple pointer
3607   // induction variables. In the code below we also support a case where we
3608   // don't have a single induction variable.
3609   //
3610   // We try to obtain an induction variable from the original loop as hard
3611   // as possible. However if we don't find one that:
3612   //   - is an integer
3613   //   - counts from zero, stepping by one
3614   //   - is the size of the widest induction variable type
3615   // then we create a new one.
3616   OldInduction = Legal->getPrimaryInduction();
3617   Type *IdxTy = Legal->getWidestInductionType();
3618   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3619   // The loop step is equal to the vectorization factor (num of SIMD elements)
3620   // times the unroll factor (num of SIMD instructions).
3621   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3622   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3623   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3624   Induction =
3625       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3626                               getDebugLocFromInstOrOperands(OldInduction));
3627 
3628   // Emit phis for the new starting index of the scalar loop.
3629   createInductionResumeValues(Lp, CountRoundDown);
3630 
3631   return completeLoopSkeleton(Lp, OrigLoopID);
3632 }
3633 
3634 // Fix up external users of the induction variable. At this point, we are
3635 // in LCSSA form, with all external PHIs that use the IV having one input value,
3636 // coming from the remainder loop. We need those PHIs to also have a correct
3637 // value for the IV when arriving directly from the middle block.
3638 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3639                                        const InductionDescriptor &II,
3640                                        Value *CountRoundDown, Value *EndValue,
3641                                        BasicBlock *MiddleBlock) {
3642   // There are two kinds of external IV usages - those that use the value
3643   // computed in the last iteration (the PHI) and those that use the penultimate
3644   // value (the value that feeds into the phi from the loop latch).
3645   // We allow both, but they, obviously, have different values.
3646 
3647   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3648 
3649   DenseMap<Value *, Value *> MissingVals;
3650 
3651   // An external user of the last iteration's value should see the value that
3652   // the remainder loop uses to initialize its own IV.
3653   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3654   for (User *U : PostInc->users()) {
3655     Instruction *UI = cast<Instruction>(U);
3656     if (!OrigLoop->contains(UI)) {
3657       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3658       MissingVals[UI] = EndValue;
3659     }
3660   }
3661 
3662   // An external user of the penultimate value need to see EndValue - Step.
3663   // The simplest way to get this is to recompute it from the constituent SCEVs,
3664   // that is Start + (Step * (CRD - 1)).
3665   for (User *U : OrigPhi->users()) {
3666     auto *UI = cast<Instruction>(U);
3667     if (!OrigLoop->contains(UI)) {
3668       const DataLayout &DL =
3669           OrigLoop->getHeader()->getModule()->getDataLayout();
3670       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3671 
3672       IRBuilder<> B(MiddleBlock->getTerminator());
3673       Value *CountMinusOne = B.CreateSub(
3674           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3675       Value *CMO =
3676           !II.getStep()->getType()->isIntegerTy()
3677               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3678                              II.getStep()->getType())
3679               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3680       CMO->setName("cast.cmo");
3681       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3682       Escape->setName("ind.escape");
3683       MissingVals[UI] = Escape;
3684     }
3685   }
3686 
3687   for (auto &I : MissingVals) {
3688     PHINode *PHI = cast<PHINode>(I.first);
3689     // One corner case we have to handle is two IVs "chasing" each-other,
3690     // that is %IV2 = phi [...], [ %IV1, %latch ]
3691     // In this case, if IV1 has an external use, we need to avoid adding both
3692     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3693     // don't already have an incoming value for the middle block.
3694     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3695       PHI->addIncoming(I.second, MiddleBlock);
3696   }
3697 }
3698 
3699 namespace {
3700 
3701 struct CSEDenseMapInfo {
3702   static bool canHandle(const Instruction *I) {
3703     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3704            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3705   }
3706 
3707   static inline Instruction *getEmptyKey() {
3708     return DenseMapInfo<Instruction *>::getEmptyKey();
3709   }
3710 
3711   static inline Instruction *getTombstoneKey() {
3712     return DenseMapInfo<Instruction *>::getTombstoneKey();
3713   }
3714 
3715   static unsigned getHashValue(const Instruction *I) {
3716     assert(canHandle(I) && "Unknown instruction!");
3717     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3718                                                            I->value_op_end()));
3719   }
3720 
3721   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3722     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3723         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3724       return LHS == RHS;
3725     return LHS->isIdenticalTo(RHS);
3726   }
3727 };
3728 
3729 } // end anonymous namespace
3730 
3731 ///Perform cse of induction variable instructions.
3732 static void cse(BasicBlock *BB) {
3733   // Perform simple cse.
3734   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3735   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3736     Instruction *In = &*I++;
3737 
3738     if (!CSEDenseMapInfo::canHandle(In))
3739       continue;
3740 
3741     // Check if we can replace this instruction with any of the
3742     // visited instructions.
3743     if (Instruction *V = CSEMap.lookup(In)) {
3744       In->replaceAllUsesWith(V);
3745       In->eraseFromParent();
3746       continue;
3747     }
3748 
3749     CSEMap[In] = In;
3750   }
3751 }
3752 
3753 InstructionCost
3754 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3755                                               bool &NeedToScalarize) {
3756   assert(!VF.isScalable() && "scalable vectors not yet supported.");
3757   Function *F = CI->getCalledFunction();
3758   Type *ScalarRetTy = CI->getType();
3759   SmallVector<Type *, 4> Tys, ScalarTys;
3760   for (auto &ArgOp : CI->arg_operands())
3761     ScalarTys.push_back(ArgOp->getType());
3762 
3763   // Estimate cost of scalarized vector call. The source operands are assumed
3764   // to be vectors, so we need to extract individual elements from there,
3765   // execute VF scalar calls, and then gather the result into the vector return
3766   // value.
3767   InstructionCost ScalarCallCost =
3768       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3769   if (VF.isScalar())
3770     return ScalarCallCost;
3771 
3772   // Compute corresponding vector type for return value and arguments.
3773   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3774   for (Type *ScalarTy : ScalarTys)
3775     Tys.push_back(ToVectorTy(ScalarTy, VF));
3776 
3777   // Compute costs of unpacking argument values for the scalar calls and
3778   // packing the return values to a vector.
3779   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3780 
3781   InstructionCost Cost =
3782       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3783 
3784   // If we can't emit a vector call for this function, then the currently found
3785   // cost is the cost we need to return.
3786   NeedToScalarize = true;
3787   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3788   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3789 
3790   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3791     return Cost;
3792 
3793   // If the corresponding vector cost is cheaper, return its cost.
3794   InstructionCost VectorCallCost =
3795       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3796   if (VectorCallCost < Cost) {
3797     NeedToScalarize = false;
3798     Cost = VectorCallCost;
3799   }
3800   return Cost;
3801 }
3802 
3803 InstructionCost
3804 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3805                                                    ElementCount VF) {
3806   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3807   assert(ID && "Expected intrinsic call!");
3808 
3809   IntrinsicCostAttributes CostAttrs(ID, *CI, VF);
3810   return TTI.getIntrinsicInstrCost(CostAttrs,
3811                                    TargetTransformInfo::TCK_RecipThroughput);
3812 }
3813 
3814 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3815   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3816   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3817   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3818 }
3819 
3820 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3821   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3822   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3823   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3824 }
3825 
3826 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3827   // For every instruction `I` in MinBWs, truncate the operands, create a
3828   // truncated version of `I` and reextend its result. InstCombine runs
3829   // later and will remove any ext/trunc pairs.
3830   SmallPtrSet<Value *, 4> Erased;
3831   for (const auto &KV : Cost->getMinimalBitwidths()) {
3832     // If the value wasn't vectorized, we must maintain the original scalar
3833     // type. The absence of the value from VectorLoopValueMap indicates that it
3834     // wasn't vectorized.
3835     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3836       continue;
3837     for (unsigned Part = 0; Part < UF; ++Part) {
3838       Value *I = getOrCreateVectorValue(KV.first, Part);
3839       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3840         continue;
3841       Type *OriginalTy = I->getType();
3842       Type *ScalarTruncatedTy =
3843           IntegerType::get(OriginalTy->getContext(), KV.second);
3844       auto *TruncatedTy = FixedVectorType::get(
3845           ScalarTruncatedTy,
3846           cast<FixedVectorType>(OriginalTy)->getNumElements());
3847       if (TruncatedTy == OriginalTy)
3848         continue;
3849 
3850       IRBuilder<> B(cast<Instruction>(I));
3851       auto ShrinkOperand = [&](Value *V) -> Value * {
3852         if (auto *ZI = dyn_cast<ZExtInst>(V))
3853           if (ZI->getSrcTy() == TruncatedTy)
3854             return ZI->getOperand(0);
3855         return B.CreateZExtOrTrunc(V, TruncatedTy);
3856       };
3857 
3858       // The actual instruction modification depends on the instruction type,
3859       // unfortunately.
3860       Value *NewI = nullptr;
3861       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3862         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3863                              ShrinkOperand(BO->getOperand(1)));
3864 
3865         // Any wrapping introduced by shrinking this operation shouldn't be
3866         // considered undefined behavior. So, we can't unconditionally copy
3867         // arithmetic wrapping flags to NewI.
3868         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3869       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3870         NewI =
3871             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3872                          ShrinkOperand(CI->getOperand(1)));
3873       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3874         NewI = B.CreateSelect(SI->getCondition(),
3875                               ShrinkOperand(SI->getTrueValue()),
3876                               ShrinkOperand(SI->getFalseValue()));
3877       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3878         switch (CI->getOpcode()) {
3879         default:
3880           llvm_unreachable("Unhandled cast!");
3881         case Instruction::Trunc:
3882           NewI = ShrinkOperand(CI->getOperand(0));
3883           break;
3884         case Instruction::SExt:
3885           NewI = B.CreateSExtOrTrunc(
3886               CI->getOperand(0),
3887               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3888           break;
3889         case Instruction::ZExt:
3890           NewI = B.CreateZExtOrTrunc(
3891               CI->getOperand(0),
3892               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3893           break;
3894         }
3895       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3896         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3897                              ->getNumElements();
3898         auto *O0 = B.CreateZExtOrTrunc(
3899             SI->getOperand(0),
3900             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3901         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3902                              ->getNumElements();
3903         auto *O1 = B.CreateZExtOrTrunc(
3904             SI->getOperand(1),
3905             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3906 
3907         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3908       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3909         // Don't do anything with the operands, just extend the result.
3910         continue;
3911       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3912         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3913                             ->getNumElements();
3914         auto *O0 = B.CreateZExtOrTrunc(
3915             IE->getOperand(0),
3916             FixedVectorType::get(ScalarTruncatedTy, Elements));
3917         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3918         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3919       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3920         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
3921                             ->getNumElements();
3922         auto *O0 = B.CreateZExtOrTrunc(
3923             EE->getOperand(0),
3924             FixedVectorType::get(ScalarTruncatedTy, Elements));
3925         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3926       } else {
3927         // If we don't know what to do, be conservative and don't do anything.
3928         continue;
3929       }
3930 
3931       // Lastly, extend the result.
3932       NewI->takeName(cast<Instruction>(I));
3933       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3934       I->replaceAllUsesWith(Res);
3935       cast<Instruction>(I)->eraseFromParent();
3936       Erased.insert(I);
3937       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3938     }
3939   }
3940 
3941   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3942   for (const auto &KV : Cost->getMinimalBitwidths()) {
3943     // If the value wasn't vectorized, we must maintain the original scalar
3944     // type. The absence of the value from VectorLoopValueMap indicates that it
3945     // wasn't vectorized.
3946     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3947       continue;
3948     for (unsigned Part = 0; Part < UF; ++Part) {
3949       Value *I = getOrCreateVectorValue(KV.first, Part);
3950       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3951       if (Inst && Inst->use_empty()) {
3952         Value *NewI = Inst->getOperand(0);
3953         Inst->eraseFromParent();
3954         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3955       }
3956     }
3957   }
3958 }
3959 
3960 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3961   // Insert truncates and extends for any truncated instructions as hints to
3962   // InstCombine.
3963   if (VF.isVector())
3964     truncateToMinimalBitwidths();
3965 
3966   // Fix widened non-induction PHIs by setting up the PHI operands.
3967   if (OrigPHIsToFix.size()) {
3968     assert(EnableVPlanNativePath &&
3969            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3970     fixNonInductionPHIs(State);
3971   }
3972 
3973   // At this point every instruction in the original loop is widened to a
3974   // vector form. Now we need to fix the recurrences in the loop. These PHI
3975   // nodes are currently empty because we did not want to introduce cycles.
3976   // This is the second stage of vectorizing recurrences.
3977   fixCrossIterationPHIs(State);
3978 
3979   // Forget the original basic block.
3980   PSE.getSE()->forgetLoop(OrigLoop);
3981 
3982   // Fix-up external users of the induction variables.
3983   for (auto &Entry : Legal->getInductionVars())
3984     fixupIVUsers(Entry.first, Entry.second,
3985                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3986                  IVEndValues[Entry.first], LoopMiddleBlock);
3987 
3988   fixLCSSAPHIs(State);
3989   for (Instruction *PI : PredicatedInstructions)
3990     sinkScalarOperands(&*PI);
3991 
3992   // Remove redundant induction instructions.
3993   cse(LoopVectorBody);
3994 
3995   // Set/update profile weights for the vector and remainder loops as original
3996   // loop iterations are now distributed among them. Note that original loop
3997   // represented by LoopScalarBody becomes remainder loop after vectorization.
3998   //
3999   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4000   // end up getting slightly roughened result but that should be OK since
4001   // profile is not inherently precise anyway. Note also possible bypass of
4002   // vector code caused by legality checks is ignored, assigning all the weight
4003   // to the vector loop, optimistically.
4004   //
4005   // For scalable vectorization we can't know at compile time how many iterations
4006   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4007   // vscale of '1'.
4008   setProfileInfoAfterUnrolling(
4009       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4010       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4011 }
4012 
4013 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4014   // In order to support recurrences we need to be able to vectorize Phi nodes.
4015   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4016   // stage #2: We now need to fix the recurrences by adding incoming edges to
4017   // the currently empty PHI nodes. At this point every instruction in the
4018   // original loop is widened to a vector form so we can use them to construct
4019   // the incoming edges.
4020   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
4021     // Handle first-order recurrences and reductions that need to be fixed.
4022     if (Legal->isFirstOrderRecurrence(&Phi))
4023       fixFirstOrderRecurrence(&Phi, State);
4024     else if (Legal->isReductionVariable(&Phi))
4025       fixReduction(&Phi, State);
4026   }
4027 }
4028 
4029 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
4030                                                   VPTransformState &State) {
4031   // This is the second phase of vectorizing first-order recurrences. An
4032   // overview of the transformation is described below. Suppose we have the
4033   // following loop.
4034   //
4035   //   for (int i = 0; i < n; ++i)
4036   //     b[i] = a[i] - a[i - 1];
4037   //
4038   // There is a first-order recurrence on "a". For this loop, the shorthand
4039   // scalar IR looks like:
4040   //
4041   //   scalar.ph:
4042   //     s_init = a[-1]
4043   //     br scalar.body
4044   //
4045   //   scalar.body:
4046   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4047   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4048   //     s2 = a[i]
4049   //     b[i] = s2 - s1
4050   //     br cond, scalar.body, ...
4051   //
4052   // In this example, s1 is a recurrence because it's value depends on the
4053   // previous iteration. In the first phase of vectorization, we created a
4054   // temporary value for s1. We now complete the vectorization and produce the
4055   // shorthand vector IR shown below (for VF = 4, UF = 1).
4056   //
4057   //   vector.ph:
4058   //     v_init = vector(..., ..., ..., a[-1])
4059   //     br vector.body
4060   //
4061   //   vector.body
4062   //     i = phi [0, vector.ph], [i+4, vector.body]
4063   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4064   //     v2 = a[i, i+1, i+2, i+3];
4065   //     v3 = vector(v1(3), v2(0, 1, 2))
4066   //     b[i, i+1, i+2, i+3] = v2 - v3
4067   //     br cond, vector.body, middle.block
4068   //
4069   //   middle.block:
4070   //     x = v2(3)
4071   //     br scalar.ph
4072   //
4073   //   scalar.ph:
4074   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4075   //     br scalar.body
4076   //
4077   // After execution completes the vector loop, we extract the next value of
4078   // the recurrence (x) to use as the initial value in the scalar loop.
4079 
4080   // Get the original loop preheader and single loop latch.
4081   auto *Preheader = OrigLoop->getLoopPreheader();
4082   auto *Latch = OrigLoop->getLoopLatch();
4083 
4084   // Get the initial and previous values of the scalar recurrence.
4085   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4086   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4087 
4088   // Create a vector from the initial value.
4089   auto *VectorInit = ScalarInit;
4090   if (VF.isVector()) {
4091     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4092     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4093     VectorInit = Builder.CreateInsertElement(
4094         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4095         Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
4096   }
4097 
4098   VPValue *PhiDef = State.Plan->getVPValue(Phi);
4099   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
4100   // We constructed a temporary phi node in the first phase of vectorization.
4101   // This phi node will eventually be deleted.
4102   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
4103 
4104   // Create a phi node for the new recurrence. The current value will either be
4105   // the initial value inserted into a vector or loop-varying vector value.
4106   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4107   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4108 
4109   // Get the vectorized previous value of the last part UF - 1. It appears last
4110   // among all unrolled iterations, due to the order of their construction.
4111   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4112 
4113   // Find and set the insertion point after the previous value if it is an
4114   // instruction.
4115   BasicBlock::iterator InsertPt;
4116   // Note that the previous value may have been constant-folded so it is not
4117   // guaranteed to be an instruction in the vector loop.
4118   // FIXME: Loop invariant values do not form recurrences. We should deal with
4119   //        them earlier.
4120   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4121     InsertPt = LoopVectorBody->getFirstInsertionPt();
4122   else {
4123     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4124     if (isa<PHINode>(PreviousLastPart))
4125       // If the previous value is a phi node, we should insert after all the phi
4126       // nodes in the block containing the PHI to avoid breaking basic block
4127       // verification. Note that the basic block may be different to
4128       // LoopVectorBody, in case we predicate the loop.
4129       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4130     else
4131       InsertPt = ++PreviousInst->getIterator();
4132   }
4133   Builder.SetInsertPoint(&*InsertPt);
4134 
4135   // We will construct a vector for the recurrence by combining the values for
4136   // the current and previous iterations. This is the required shuffle mask.
4137   assert(!VF.isScalable());
4138   SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
4139   ShuffleMask[0] = VF.getKnownMinValue() - 1;
4140   for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
4141     ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
4142 
4143   // The vector from which to take the initial value for the current iteration
4144   // (actual or unrolled). Initially, this is the vector phi node.
4145   Value *Incoming = VecPhi;
4146 
4147   // Shuffle the current and previous vector and update the vector parts.
4148   for (unsigned Part = 0; Part < UF; ++Part) {
4149     Value *PreviousPart = State.get(PreviousDef, Part);
4150     Value *PhiPart = State.get(PhiDef, Part);
4151     auto *Shuffle =
4152         VF.isVector()
4153             ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
4154             : Incoming;
4155     PhiPart->replaceAllUsesWith(Shuffle);
4156     cast<Instruction>(PhiPart)->eraseFromParent();
4157     State.reset(PhiDef, Phi, Shuffle, Part);
4158     Incoming = PreviousPart;
4159   }
4160 
4161   // Fix the latch value of the new recurrence in the vector loop.
4162   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4163 
4164   // Extract the last vector element in the middle block. This will be the
4165   // initial value for the recurrence when jumping to the scalar loop.
4166   auto *ExtractForScalar = Incoming;
4167   if (VF.isVector()) {
4168     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4169     ExtractForScalar = Builder.CreateExtractElement(
4170         ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
4171         "vector.recur.extract");
4172   }
4173   // Extract the second last element in the middle block if the
4174   // Phi is used outside the loop. We need to extract the phi itself
4175   // and not the last element (the phi update in the current iteration). This
4176   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4177   // when the scalar loop is not run at all.
4178   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4179   if (VF.isVector())
4180     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4181         Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
4182         "vector.recur.extract.for.phi");
4183   // When loop is unrolled without vectorizing, initialize
4184   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4185   // `Incoming`. This is analogous to the vectorized case above: extracting the
4186   // second last element when VF > 1.
4187   else if (UF > 1)
4188     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4189 
4190   // Fix the initial value of the original recurrence in the scalar loop.
4191   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4192   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4193   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4194     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4195     Start->addIncoming(Incoming, BB);
4196   }
4197 
4198   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4199   Phi->setName("scalar.recur");
4200 
4201   // Finally, fix users of the recurrence outside the loop. The users will need
4202   // either the last value of the scalar recurrence or the last value of the
4203   // vector recurrence we extracted in the middle block. Since the loop is in
4204   // LCSSA form, we just need to find all the phi nodes for the original scalar
4205   // recurrence in the exit block, and then add an edge for the middle block.
4206   // Note that LCSSA does not imply single entry when the original scalar loop
4207   // had multiple exiting edges (as we always run the last iteration in the
4208   // scalar epilogue); in that case, the exiting path through middle will be
4209   // dynamically dead and the value picked for the phi doesn't matter.
4210   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4211     if (any_of(LCSSAPhi.incoming_values(),
4212                [Phi](Value *V) { return V == Phi; }))
4213       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4214 }
4215 
4216 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) {
4217   // Get it's reduction variable descriptor.
4218   assert(Legal->isReductionVariable(Phi) &&
4219          "Unable to find the reduction variable");
4220   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4221 
4222   RecurKind RK = RdxDesc.getRecurrenceKind();
4223   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4224   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4225   setDebugLocFromInst(Builder, ReductionStartValue);
4226   bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi);
4227 
4228   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4229   // This is the vector-clone of the value that leaves the loop.
4230   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4231 
4232   // Wrap flags are in general invalid after vectorization, clear them.
4233   clearReductionWrapFlags(RdxDesc);
4234 
4235   // Fix the vector-loop phi.
4236 
4237   // Reductions do not have to start at zero. They can start with
4238   // any loop invariant values.
4239   BasicBlock *Latch = OrigLoop->getLoopLatch();
4240   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4241 
4242   for (unsigned Part = 0; Part < UF; ++Part) {
4243     Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part);
4244     Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part);
4245     cast<PHINode>(VecRdxPhi)
4246       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4247   }
4248 
4249   // Before each round, move the insertion point right between
4250   // the PHIs and the values we are going to write.
4251   // This allows us to write both PHINodes and the extractelement
4252   // instructions.
4253   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4254 
4255   setDebugLocFromInst(Builder, LoopExitInst);
4256 
4257   // If tail is folded by masking, the vector value to leave the loop should be
4258   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4259   // instead of the former. For an inloop reduction the reduction will already
4260   // be predicated, and does not need to be handled here.
4261   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4262     for (unsigned Part = 0; Part < UF; ++Part) {
4263       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4264       Value *Sel = nullptr;
4265       for (User *U : VecLoopExitInst->users()) {
4266         if (isa<SelectInst>(U)) {
4267           assert(!Sel && "Reduction exit feeding two selects");
4268           Sel = U;
4269         } else
4270           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4271       }
4272       assert(Sel && "Reduction exit feeds no select");
4273       State.reset(LoopExitInstDef, LoopExitInst, Sel, Part);
4274 
4275       // If the target can create a predicated operator for the reduction at no
4276       // extra cost in the loop (for example a predicated vadd), it can be
4277       // cheaper for the select to remain in the loop than be sunk out of it,
4278       // and so use the select value for the phi instead of the old
4279       // LoopExitValue.
4280       RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4281       if (PreferPredicatedReductionSelect ||
4282           TTI->preferPredicatedReductionSelect(
4283               RdxDesc.getOpcode(), Phi->getType(),
4284               TargetTransformInfo::ReductionFlags())) {
4285         auto *VecRdxPhi =
4286             cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part));
4287         VecRdxPhi->setIncomingValueForBlock(
4288             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4289       }
4290     }
4291   }
4292 
4293   // If the vector reduction can be performed in a smaller type, we truncate
4294   // then extend the loop exit value to enable InstCombine to evaluate the
4295   // entire expression in the smaller type.
4296   if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) {
4297     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4298     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4299     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4300     Builder.SetInsertPoint(
4301         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4302     VectorParts RdxParts(UF);
4303     for (unsigned Part = 0; Part < UF; ++Part) {
4304       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4305       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4306       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4307                                         : Builder.CreateZExt(Trunc, VecTy);
4308       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4309            UI != RdxParts[Part]->user_end();)
4310         if (*UI != Trunc) {
4311           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4312           RdxParts[Part] = Extnd;
4313         } else {
4314           ++UI;
4315         }
4316     }
4317     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4318     for (unsigned Part = 0; Part < UF; ++Part) {
4319       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4320       State.reset(LoopExitInstDef, LoopExitInst, RdxParts[Part], Part);
4321     }
4322   }
4323 
4324   // Reduce all of the unrolled parts into a single vector.
4325   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4326   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4327 
4328   // The middle block terminator has already been assigned a DebugLoc here (the
4329   // OrigLoop's single latch terminator). We want the whole middle block to
4330   // appear to execute on this line because: (a) it is all compiler generated,
4331   // (b) these instructions are always executed after evaluating the latch
4332   // conditional branch, and (c) other passes may add new predecessors which
4333   // terminate on this line. This is the easiest way to ensure we don't
4334   // accidentally cause an extra step back into the loop while debugging.
4335   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4336   {
4337     // Floating-point operations should have some FMF to enable the reduction.
4338     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4339     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4340     for (unsigned Part = 1; Part < UF; ++Part) {
4341       Value *RdxPart = State.get(LoopExitInstDef, Part);
4342       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4343         ReducedPartRdx = Builder.CreateBinOp(
4344             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4345       } else {
4346         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4347       }
4348     }
4349   }
4350 
4351   // Create the reduction after the loop. Note that inloop reductions create the
4352   // target reduction in the loop using a Reduction recipe.
4353   if (VF.isVector() && !IsInLoopReductionPhi) {
4354     ReducedPartRdx =
4355         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4356     // If the reduction can be performed in a smaller type, we need to extend
4357     // the reduction to the wider type before we branch to the original loop.
4358     if (Phi->getType() != RdxDesc.getRecurrenceType())
4359       ReducedPartRdx =
4360         RdxDesc.isSigned()
4361         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4362         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4363   }
4364 
4365   // Create a phi node that merges control-flow from the backedge-taken check
4366   // block and the middle block.
4367   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4368                                         LoopScalarPreHeader->getTerminator());
4369   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4370     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4371   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4372 
4373   // Now, we need to fix the users of the reduction variable
4374   // inside and outside of the scalar remainder loop.
4375 
4376   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4377   // in the exit blocks.  See comment on analogous loop in
4378   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4379   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4380     if (any_of(LCSSAPhi.incoming_values(),
4381                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4382       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4383 
4384   // Fix the scalar loop reduction variable with the incoming reduction sum
4385   // from the vector body and from the backedge value.
4386   int IncomingEdgeBlockIdx =
4387     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4388   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4389   // Pick the other block.
4390   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4391   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4392   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4393 }
4394 
4395 void InnerLoopVectorizer::clearReductionWrapFlags(
4396     RecurrenceDescriptor &RdxDesc) {
4397   RecurKind RK = RdxDesc.getRecurrenceKind();
4398   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4399     return;
4400 
4401   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4402   assert(LoopExitInstr && "null loop exit instruction");
4403   SmallVector<Instruction *, 8> Worklist;
4404   SmallPtrSet<Instruction *, 8> Visited;
4405   Worklist.push_back(LoopExitInstr);
4406   Visited.insert(LoopExitInstr);
4407 
4408   while (!Worklist.empty()) {
4409     Instruction *Cur = Worklist.pop_back_val();
4410     if (isa<OverflowingBinaryOperator>(Cur))
4411       for (unsigned Part = 0; Part < UF; ++Part) {
4412         Value *V = getOrCreateVectorValue(Cur, Part);
4413         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4414       }
4415 
4416     for (User *U : Cur->users()) {
4417       Instruction *UI = cast<Instruction>(U);
4418       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4419           Visited.insert(UI).second)
4420         Worklist.push_back(UI);
4421     }
4422   }
4423 }
4424 
4425 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4426   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4427     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4428       // Some phis were already hand updated by the reduction and recurrence
4429       // code above, leave them alone.
4430       continue;
4431 
4432     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4433     // Non-instruction incoming values will have only one value.
4434     unsigned LastLane = 0;
4435     if (isa<Instruction>(IncomingValue))
4436       LastLane = Cost->isUniformAfterVectorization(
4437                      cast<Instruction>(IncomingValue), VF)
4438                      ? 0
4439                      : VF.getKnownMinValue() - 1;
4440     assert((!VF.isScalable() || LastLane == 0) &&
4441            "scalable vectors dont support non-uniform scalars yet");
4442     // Can be a loop invariant incoming value or the last scalar value to be
4443     // extracted from the vectorized loop.
4444     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4445     Value *lastIncomingValue =
4446         OrigLoop->isLoopInvariant(IncomingValue)
4447             ? IncomingValue
4448             : State.get(State.Plan->getVPValue(IncomingValue),
4449                         VPIteration(UF - 1, LastLane));
4450     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4451   }
4452 }
4453 
4454 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4455   // The basic block and loop containing the predicated instruction.
4456   auto *PredBB = PredInst->getParent();
4457   auto *VectorLoop = LI->getLoopFor(PredBB);
4458 
4459   // Initialize a worklist with the operands of the predicated instruction.
4460   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4461 
4462   // Holds instructions that we need to analyze again. An instruction may be
4463   // reanalyzed if we don't yet know if we can sink it or not.
4464   SmallVector<Instruction *, 8> InstsToReanalyze;
4465 
4466   // Returns true if a given use occurs in the predicated block. Phi nodes use
4467   // their operands in their corresponding predecessor blocks.
4468   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4469     auto *I = cast<Instruction>(U.getUser());
4470     BasicBlock *BB = I->getParent();
4471     if (auto *Phi = dyn_cast<PHINode>(I))
4472       BB = Phi->getIncomingBlock(
4473           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4474     return BB == PredBB;
4475   };
4476 
4477   // Iteratively sink the scalarized operands of the predicated instruction
4478   // into the block we created for it. When an instruction is sunk, it's
4479   // operands are then added to the worklist. The algorithm ends after one pass
4480   // through the worklist doesn't sink a single instruction.
4481   bool Changed;
4482   do {
4483     // Add the instructions that need to be reanalyzed to the worklist, and
4484     // reset the changed indicator.
4485     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4486     InstsToReanalyze.clear();
4487     Changed = false;
4488 
4489     while (!Worklist.empty()) {
4490       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4491 
4492       // We can't sink an instruction if it is a phi node, is already in the
4493       // predicated block, is not in the loop, or may have side effects.
4494       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4495           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4496         continue;
4497 
4498       // It's legal to sink the instruction if all its uses occur in the
4499       // predicated block. Otherwise, there's nothing to do yet, and we may
4500       // need to reanalyze the instruction.
4501       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4502         InstsToReanalyze.push_back(I);
4503         continue;
4504       }
4505 
4506       // Move the instruction to the beginning of the predicated block, and add
4507       // it's operands to the worklist.
4508       I->moveBefore(&*PredBB->getFirstInsertionPt());
4509       Worklist.insert(I->op_begin(), I->op_end());
4510 
4511       // The sinking may have enabled other instructions to be sunk, so we will
4512       // need to iterate.
4513       Changed = true;
4514     }
4515   } while (Changed);
4516 }
4517 
4518 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4519   for (PHINode *OrigPhi : OrigPHIsToFix) {
4520     PHINode *NewPhi =
4521         cast<PHINode>(State.get(State.Plan->getVPValue(OrigPhi), 0));
4522     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4523 
4524     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4525         predecessors(OrigPhi->getParent()));
4526     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4527         predecessors(NewPhi->getParent()));
4528     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4529            "Scalar and Vector BB should have the same number of predecessors");
4530 
4531     // The insertion point in Builder may be invalidated by the time we get
4532     // here. Force the Builder insertion point to something valid so that we do
4533     // not run into issues during insertion point restore in
4534     // getOrCreateVectorValue calls below.
4535     Builder.SetInsertPoint(NewPhi);
4536 
4537     // The predecessor order is preserved and we can rely on mapping between
4538     // scalar and vector block predecessors.
4539     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4540       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4541 
4542       // When looking up the new scalar/vector values to fix up, use incoming
4543       // values from original phi.
4544       Value *ScIncV =
4545           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4546 
4547       // Scalar incoming value may need a broadcast
4548       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4549       NewPhi->addIncoming(NewIncV, NewPredBB);
4550     }
4551   }
4552 }
4553 
4554 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4555                                    VPUser &Operands, unsigned UF,
4556                                    ElementCount VF, bool IsPtrLoopInvariant,
4557                                    SmallBitVector &IsIndexLoopInvariant,
4558                                    VPTransformState &State) {
4559   // Construct a vector GEP by widening the operands of the scalar GEP as
4560   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4561   // results in a vector of pointers when at least one operand of the GEP
4562   // is vector-typed. Thus, to keep the representation compact, we only use
4563   // vector-typed operands for loop-varying values.
4564 
4565   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4566     // If we are vectorizing, but the GEP has only loop-invariant operands,
4567     // the GEP we build (by only using vector-typed operands for
4568     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4569     // produce a vector of pointers, we need to either arbitrarily pick an
4570     // operand to broadcast, or broadcast a clone of the original GEP.
4571     // Here, we broadcast a clone of the original.
4572     //
4573     // TODO: If at some point we decide to scalarize instructions having
4574     //       loop-invariant operands, this special case will no longer be
4575     //       required. We would add the scalarization decision to
4576     //       collectLoopScalars() and teach getVectorValue() to broadcast
4577     //       the lane-zero scalar value.
4578     auto *Clone = Builder.Insert(GEP->clone());
4579     for (unsigned Part = 0; Part < UF; ++Part) {
4580       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4581       State.set(VPDef, GEP, EntryPart, Part);
4582       addMetadata(EntryPart, GEP);
4583     }
4584   } else {
4585     // If the GEP has at least one loop-varying operand, we are sure to
4586     // produce a vector of pointers. But if we are only unrolling, we want
4587     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4588     // produce with the code below will be scalar (if VF == 1) or vector
4589     // (otherwise). Note that for the unroll-only case, we still maintain
4590     // values in the vector mapping with initVector, as we do for other
4591     // instructions.
4592     for (unsigned Part = 0; Part < UF; ++Part) {
4593       // The pointer operand of the new GEP. If it's loop-invariant, we
4594       // won't broadcast it.
4595       auto *Ptr = IsPtrLoopInvariant
4596                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4597                       : State.get(Operands.getOperand(0), Part);
4598 
4599       // Collect all the indices for the new GEP. If any index is
4600       // loop-invariant, we won't broadcast it.
4601       SmallVector<Value *, 4> Indices;
4602       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4603         VPValue *Operand = Operands.getOperand(I);
4604         if (IsIndexLoopInvariant[I - 1])
4605           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4606         else
4607           Indices.push_back(State.get(Operand, Part));
4608       }
4609 
4610       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4611       // but it should be a vector, otherwise.
4612       auto *NewGEP =
4613           GEP->isInBounds()
4614               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4615                                           Indices)
4616               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4617       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4618              "NewGEP is not a pointer vector");
4619       State.set(VPDef, GEP, NewGEP, Part);
4620       addMetadata(NewGEP, GEP);
4621     }
4622   }
4623 }
4624 
4625 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4626                                               RecurrenceDescriptor *RdxDesc,
4627                                               Value *StartV, unsigned UF,
4628                                               ElementCount VF) {
4629   assert(!VF.isScalable() && "scalable vectors not yet supported.");
4630   PHINode *P = cast<PHINode>(PN);
4631   if (EnableVPlanNativePath) {
4632     // Currently we enter here in the VPlan-native path for non-induction
4633     // PHIs where all control flow is uniform. We simply widen these PHIs.
4634     // Create a vector phi with no operands - the vector phi operands will be
4635     // set at the end of vector code generation.
4636     Type *VecTy =
4637         (VF.isScalar()) ? PN->getType() : VectorType::get(PN->getType(), VF);
4638     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4639     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4640     OrigPHIsToFix.push_back(P);
4641 
4642     return;
4643   }
4644 
4645   assert(PN->getParent() == OrigLoop->getHeader() &&
4646          "Non-header phis should have been handled elsewhere");
4647 
4648   // In order to support recurrences we need to be able to vectorize Phi nodes.
4649   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4650   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4651   // this value when we vectorize all of the instructions that use the PHI.
4652   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4653     Value *Iden = nullptr;
4654     bool ScalarPHI =
4655         (VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4656     Type *VecTy =
4657         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), VF);
4658 
4659     if (RdxDesc) {
4660       assert(Legal->isReductionVariable(P) && StartV &&
4661              "RdxDesc should only be set for reduction variables; in that case "
4662              "a StartV is also required");
4663       RecurKind RK = RdxDesc->getRecurrenceKind();
4664       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4665         // MinMax reduction have the start value as their identify.
4666         if (ScalarPHI) {
4667           Iden = StartV;
4668         } else {
4669           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4670           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4671           StartV = Iden = Builder.CreateVectorSplat(VF, StartV, "minmax.ident");
4672         }
4673       } else {
4674         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4675             RK, VecTy->getScalarType());
4676         Iden = IdenC;
4677 
4678         if (!ScalarPHI) {
4679           Iden = ConstantVector::getSplat(VF, IdenC);
4680           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4681           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4682           Constant *Zero = Builder.getInt32(0);
4683           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4684         }
4685       }
4686     }
4687 
4688     for (unsigned Part = 0; Part < UF; ++Part) {
4689       // This is phase one of vectorizing PHIs.
4690       Value *EntryPart = PHINode::Create(
4691           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4692       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4693       if (StartV) {
4694         // Make sure to add the reduction start value only to the
4695         // first unroll part.
4696         Value *StartVal = (Part == 0) ? StartV : Iden;
4697         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4698       }
4699     }
4700     return;
4701   }
4702 
4703   assert(!Legal->isReductionVariable(P) &&
4704          "reductions should be handled above");
4705 
4706   setDebugLocFromInst(Builder, P);
4707 
4708   // This PHINode must be an induction variable.
4709   // Make sure that we know about it.
4710   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4711 
4712   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4713   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4714 
4715   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4716   // which can be found from the original scalar operations.
4717   switch (II.getKind()) {
4718   case InductionDescriptor::IK_NoInduction:
4719     llvm_unreachable("Unknown induction");
4720   case InductionDescriptor::IK_IntInduction:
4721   case InductionDescriptor::IK_FpInduction:
4722     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4723   case InductionDescriptor::IK_PtrInduction: {
4724     // Handle the pointer induction variable case.
4725     assert(P->getType()->isPointerTy() && "Unexpected type.");
4726 
4727     if (Cost->isScalarAfterVectorization(P, VF)) {
4728       // This is the normalized GEP that starts counting at zero.
4729       Value *PtrInd =
4730           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4731       // Determine the number of scalars we need to generate for each unroll
4732       // iteration. If the instruction is uniform, we only need to generate the
4733       // first lane. Otherwise, we generate all VF values.
4734       unsigned Lanes =
4735           Cost->isUniformAfterVectorization(P, VF) ? 1 : VF.getKnownMinValue();
4736       for (unsigned Part = 0; Part < UF; ++Part) {
4737         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4738           Constant *Idx = ConstantInt::get(PtrInd->getType(),
4739                                            Lane + Part * VF.getKnownMinValue());
4740           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4741           Value *SclrGep =
4742               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4743           SclrGep->setName("next.gep");
4744           VectorLoopValueMap.setScalarValue(P, VPIteration(Part, Lane),
4745                                             SclrGep);
4746         }
4747       }
4748       return;
4749     }
4750     assert(isa<SCEVConstant>(II.getStep()) &&
4751            "Induction step not a SCEV constant!");
4752     Type *PhiType = II.getStep()->getType();
4753 
4754     // Build a pointer phi
4755     Value *ScalarStartValue = II.getStartValue();
4756     Type *ScStValueType = ScalarStartValue->getType();
4757     PHINode *NewPointerPhi =
4758         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4759     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4760 
4761     // A pointer induction, performed by using a gep
4762     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4763     Instruction *InductionLoc = LoopLatch->getTerminator();
4764     const SCEV *ScalarStep = II.getStep();
4765     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4766     Value *ScalarStepValue =
4767         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4768     Value *InductionGEP = GetElementPtrInst::Create(
4769         ScStValueType->getPointerElementType(), NewPointerPhi,
4770         Builder.CreateMul(
4771             ScalarStepValue,
4772             ConstantInt::get(PhiType, VF.getKnownMinValue() * UF)),
4773         "ptr.ind", InductionLoc);
4774     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4775 
4776     // Create UF many actual address geps that use the pointer
4777     // phi as base and a vectorized version of the step value
4778     // (<step*0, ..., step*N>) as offset.
4779     for (unsigned Part = 0; Part < UF; ++Part) {
4780       SmallVector<Constant *, 8> Indices;
4781       // Create a vector of consecutive numbers from zero to VF.
4782       for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
4783         Indices.push_back(
4784             ConstantInt::get(PhiType, i + Part * VF.getKnownMinValue()));
4785       Constant *StartOffset = ConstantVector::get(Indices);
4786 
4787       Value *GEP = Builder.CreateGEP(
4788           ScStValueType->getPointerElementType(), NewPointerPhi,
4789           Builder.CreateMul(
4790               StartOffset,
4791               Builder.CreateVectorSplat(VF.getKnownMinValue(), ScalarStepValue),
4792               "vector.gep"));
4793       VectorLoopValueMap.setVectorValue(P, Part, GEP);
4794     }
4795   }
4796   }
4797 }
4798 
4799 /// A helper function for checking whether an integer division-related
4800 /// instruction may divide by zero (in which case it must be predicated if
4801 /// executed conditionally in the scalar code).
4802 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4803 /// Non-zero divisors that are non compile-time constants will not be
4804 /// converted into multiplication, so we will still end up scalarizing
4805 /// the division, but can do so w/o predication.
4806 static bool mayDivideByZero(Instruction &I) {
4807   assert((I.getOpcode() == Instruction::UDiv ||
4808           I.getOpcode() == Instruction::SDiv ||
4809           I.getOpcode() == Instruction::URem ||
4810           I.getOpcode() == Instruction::SRem) &&
4811          "Unexpected instruction");
4812   Value *Divisor = I.getOperand(1);
4813   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4814   return !CInt || CInt->isZero();
4815 }
4816 
4817 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4818                                            VPUser &User,
4819                                            VPTransformState &State) {
4820   switch (I.getOpcode()) {
4821   case Instruction::Call:
4822   case Instruction::Br:
4823   case Instruction::PHI:
4824   case Instruction::GetElementPtr:
4825   case Instruction::Select:
4826     llvm_unreachable("This instruction is handled by a different recipe.");
4827   case Instruction::UDiv:
4828   case Instruction::SDiv:
4829   case Instruction::SRem:
4830   case Instruction::URem:
4831   case Instruction::Add:
4832   case Instruction::FAdd:
4833   case Instruction::Sub:
4834   case Instruction::FSub:
4835   case Instruction::FNeg:
4836   case Instruction::Mul:
4837   case Instruction::FMul:
4838   case Instruction::FDiv:
4839   case Instruction::FRem:
4840   case Instruction::Shl:
4841   case Instruction::LShr:
4842   case Instruction::AShr:
4843   case Instruction::And:
4844   case Instruction::Or:
4845   case Instruction::Xor: {
4846     // Just widen unops and binops.
4847     setDebugLocFromInst(Builder, &I);
4848 
4849     for (unsigned Part = 0; Part < UF; ++Part) {
4850       SmallVector<Value *, 2> Ops;
4851       for (VPValue *VPOp : User.operands())
4852         Ops.push_back(State.get(VPOp, Part));
4853 
4854       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4855 
4856       if (auto *VecOp = dyn_cast<Instruction>(V))
4857         VecOp->copyIRFlags(&I);
4858 
4859       // Use this vector value for all users of the original instruction.
4860       State.set(Def, &I, V, Part);
4861       addMetadata(V, &I);
4862     }
4863 
4864     break;
4865   }
4866   case Instruction::ICmp:
4867   case Instruction::FCmp: {
4868     // Widen compares. Generate vector compares.
4869     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4870     auto *Cmp = cast<CmpInst>(&I);
4871     setDebugLocFromInst(Builder, Cmp);
4872     for (unsigned Part = 0; Part < UF; ++Part) {
4873       Value *A = State.get(User.getOperand(0), Part);
4874       Value *B = State.get(User.getOperand(1), Part);
4875       Value *C = nullptr;
4876       if (FCmp) {
4877         // Propagate fast math flags.
4878         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4879         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4880         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4881       } else {
4882         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4883       }
4884       State.set(Def, &I, C, Part);
4885       addMetadata(C, &I);
4886     }
4887 
4888     break;
4889   }
4890 
4891   case Instruction::ZExt:
4892   case Instruction::SExt:
4893   case Instruction::FPToUI:
4894   case Instruction::FPToSI:
4895   case Instruction::FPExt:
4896   case Instruction::PtrToInt:
4897   case Instruction::IntToPtr:
4898   case Instruction::SIToFP:
4899   case Instruction::UIToFP:
4900   case Instruction::Trunc:
4901   case Instruction::FPTrunc:
4902   case Instruction::BitCast: {
4903     auto *CI = cast<CastInst>(&I);
4904     setDebugLocFromInst(Builder, CI);
4905 
4906     /// Vectorize casts.
4907     Type *DestTy =
4908         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4909 
4910     for (unsigned Part = 0; Part < UF; ++Part) {
4911       Value *A = State.get(User.getOperand(0), Part);
4912       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4913       State.set(Def, &I, Cast, Part);
4914       addMetadata(Cast, &I);
4915     }
4916     break;
4917   }
4918   default:
4919     // This instruction is not vectorized by simple widening.
4920     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4921     llvm_unreachable("Unhandled instruction!");
4922   } // end of switch.
4923 }
4924 
4925 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4926                                                VPUser &ArgOperands,
4927                                                VPTransformState &State) {
4928   assert(!isa<DbgInfoIntrinsic>(I) &&
4929          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4930   setDebugLocFromInst(Builder, &I);
4931 
4932   Module *M = I.getParent()->getParent()->getParent();
4933   auto *CI = cast<CallInst>(&I);
4934 
4935   SmallVector<Type *, 4> Tys;
4936   for (Value *ArgOperand : CI->arg_operands())
4937     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4938 
4939   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4940 
4941   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4942   // version of the instruction.
4943   // Is it beneficial to perform intrinsic call compared to lib call?
4944   bool NeedToScalarize = false;
4945   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4946   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4947   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4948   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4949          "Instruction should be scalarized elsewhere.");
4950   assert(IntrinsicCost.isValid() && CallCost.isValid() &&
4951          "Cannot have invalid costs while widening");
4952 
4953   for (unsigned Part = 0; Part < UF; ++Part) {
4954     SmallVector<Value *, 4> Args;
4955     for (auto &I : enumerate(ArgOperands.operands())) {
4956       // Some intrinsics have a scalar argument - don't replace it with a
4957       // vector.
4958       Value *Arg;
4959       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4960         Arg = State.get(I.value(), Part);
4961       else
4962         Arg = State.get(I.value(), VPIteration(0, 0));
4963       Args.push_back(Arg);
4964     }
4965 
4966     Function *VectorF;
4967     if (UseVectorIntrinsic) {
4968       // Use vector version of the intrinsic.
4969       Type *TysForDecl[] = {CI->getType()};
4970       if (VF.isVector()) {
4971         assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4972         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4973       }
4974       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4975       assert(VectorF && "Can't retrieve vector intrinsic.");
4976     } else {
4977       // Use vector version of the function call.
4978       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4979 #ifndef NDEBUG
4980       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4981              "Can't create vector function.");
4982 #endif
4983         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4984     }
4985       SmallVector<OperandBundleDef, 1> OpBundles;
4986       CI->getOperandBundlesAsDefs(OpBundles);
4987       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4988 
4989       if (isa<FPMathOperator>(V))
4990         V->copyFastMathFlags(CI);
4991 
4992       State.set(Def, &I, V, Part);
4993       addMetadata(V, &I);
4994   }
4995 }
4996 
4997 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
4998                                                  VPUser &Operands,
4999                                                  bool InvariantCond,
5000                                                  VPTransformState &State) {
5001   setDebugLocFromInst(Builder, &I);
5002 
5003   // The condition can be loop invariant  but still defined inside the
5004   // loop. This means that we can't just use the original 'cond' value.
5005   // We have to take the 'vectorized' value and pick the first lane.
5006   // Instcombine will make this a no-op.
5007   auto *InvarCond = InvariantCond
5008                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5009                         : nullptr;
5010 
5011   for (unsigned Part = 0; Part < UF; ++Part) {
5012     Value *Cond =
5013         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5014     Value *Op0 = State.get(Operands.getOperand(1), Part);
5015     Value *Op1 = State.get(Operands.getOperand(2), Part);
5016     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5017     State.set(VPDef, &I, Sel, Part);
5018     addMetadata(Sel, &I);
5019   }
5020 }
5021 
5022 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5023   // We should not collect Scalars more than once per VF. Right now, this
5024   // function is called from collectUniformsAndScalars(), which already does
5025   // this check. Collecting Scalars for VF=1 does not make any sense.
5026   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5027          "This function should not be visited twice for the same VF");
5028 
5029   SmallSetVector<Instruction *, 8> Worklist;
5030 
5031   // These sets are used to seed the analysis with pointers used by memory
5032   // accesses that will remain scalar.
5033   SmallSetVector<Instruction *, 8> ScalarPtrs;
5034   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5035   auto *Latch = TheLoop->getLoopLatch();
5036 
5037   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5038   // The pointer operands of loads and stores will be scalar as long as the
5039   // memory access is not a gather or scatter operation. The value operand of a
5040   // store will remain scalar if the store is scalarized.
5041   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5042     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5043     assert(WideningDecision != CM_Unknown &&
5044            "Widening decision should be ready at this moment");
5045     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5046       if (Ptr == Store->getValueOperand())
5047         return WideningDecision == CM_Scalarize;
5048     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5049            "Ptr is neither a value or pointer operand");
5050     return WideningDecision != CM_GatherScatter;
5051   };
5052 
5053   // A helper that returns true if the given value is a bitcast or
5054   // getelementptr instruction contained in the loop.
5055   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5056     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5057             isa<GetElementPtrInst>(V)) &&
5058            !TheLoop->isLoopInvariant(V);
5059   };
5060 
5061   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5062     if (!isa<PHINode>(Ptr) ||
5063         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5064       return false;
5065     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5066     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5067       return false;
5068     return isScalarUse(MemAccess, Ptr);
5069   };
5070 
5071   // A helper that evaluates a memory access's use of a pointer. If the
5072   // pointer is actually the pointer induction of a loop, it is being
5073   // inserted into Worklist. If the use will be a scalar use, and the
5074   // pointer is only used by memory accesses, we place the pointer in
5075   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5076   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5077     if (isScalarPtrInduction(MemAccess, Ptr)) {
5078       Worklist.insert(cast<Instruction>(Ptr));
5079       Instruction *Update = cast<Instruction>(
5080           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5081       Worklist.insert(Update);
5082       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5083                         << "\n");
5084       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5085                         << "\n");
5086       return;
5087     }
5088     // We only care about bitcast and getelementptr instructions contained in
5089     // the loop.
5090     if (!isLoopVaryingBitCastOrGEP(Ptr))
5091       return;
5092 
5093     // If the pointer has already been identified as scalar (e.g., if it was
5094     // also identified as uniform), there's nothing to do.
5095     auto *I = cast<Instruction>(Ptr);
5096     if (Worklist.count(I))
5097       return;
5098 
5099     // If the use of the pointer will be a scalar use, and all users of the
5100     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5101     // place the pointer in PossibleNonScalarPtrs.
5102     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5103           return isa<LoadInst>(U) || isa<StoreInst>(U);
5104         }))
5105       ScalarPtrs.insert(I);
5106     else
5107       PossibleNonScalarPtrs.insert(I);
5108   };
5109 
5110   // We seed the scalars analysis with three classes of instructions: (1)
5111   // instructions marked uniform-after-vectorization and (2) bitcast,
5112   // getelementptr and (pointer) phi instructions used by memory accesses
5113   // requiring a scalar use.
5114   //
5115   // (1) Add to the worklist all instructions that have been identified as
5116   // uniform-after-vectorization.
5117   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5118 
5119   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5120   // memory accesses requiring a scalar use. The pointer operands of loads and
5121   // stores will be scalar as long as the memory accesses is not a gather or
5122   // scatter operation. The value operand of a store will remain scalar if the
5123   // store is scalarized.
5124   for (auto *BB : TheLoop->blocks())
5125     for (auto &I : *BB) {
5126       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5127         evaluatePtrUse(Load, Load->getPointerOperand());
5128       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5129         evaluatePtrUse(Store, Store->getPointerOperand());
5130         evaluatePtrUse(Store, Store->getValueOperand());
5131       }
5132     }
5133   for (auto *I : ScalarPtrs)
5134     if (!PossibleNonScalarPtrs.count(I)) {
5135       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5136       Worklist.insert(I);
5137     }
5138 
5139   // Insert the forced scalars.
5140   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5141   // induction variable when the PHI user is scalarized.
5142   auto ForcedScalar = ForcedScalars.find(VF);
5143   if (ForcedScalar != ForcedScalars.end())
5144     for (auto *I : ForcedScalar->second)
5145       Worklist.insert(I);
5146 
5147   // Expand the worklist by looking through any bitcasts and getelementptr
5148   // instructions we've already identified as scalar. This is similar to the
5149   // expansion step in collectLoopUniforms(); however, here we're only
5150   // expanding to include additional bitcasts and getelementptr instructions.
5151   unsigned Idx = 0;
5152   while (Idx != Worklist.size()) {
5153     Instruction *Dst = Worklist[Idx++];
5154     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5155       continue;
5156     auto *Src = cast<Instruction>(Dst->getOperand(0));
5157     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5158           auto *J = cast<Instruction>(U);
5159           return !TheLoop->contains(J) || Worklist.count(J) ||
5160                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5161                   isScalarUse(J, Src));
5162         })) {
5163       Worklist.insert(Src);
5164       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5165     }
5166   }
5167 
5168   // An induction variable will remain scalar if all users of the induction
5169   // variable and induction variable update remain scalar.
5170   for (auto &Induction : Legal->getInductionVars()) {
5171     auto *Ind = Induction.first;
5172     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5173 
5174     // If tail-folding is applied, the primary induction variable will be used
5175     // to feed a vector compare.
5176     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5177       continue;
5178 
5179     // Determine if all users of the induction variable are scalar after
5180     // vectorization.
5181     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5182       auto *I = cast<Instruction>(U);
5183       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5184     });
5185     if (!ScalarInd)
5186       continue;
5187 
5188     // Determine if all users of the induction variable update instruction are
5189     // scalar after vectorization.
5190     auto ScalarIndUpdate =
5191         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5192           auto *I = cast<Instruction>(U);
5193           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5194         });
5195     if (!ScalarIndUpdate)
5196       continue;
5197 
5198     // The induction variable and its update instruction will remain scalar.
5199     Worklist.insert(Ind);
5200     Worklist.insert(IndUpdate);
5201     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5202     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5203                       << "\n");
5204   }
5205 
5206   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5207 }
5208 
5209 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I,
5210                                                          ElementCount VF) {
5211   if (!blockNeedsPredication(I->getParent()))
5212     return false;
5213   switch(I->getOpcode()) {
5214   default:
5215     break;
5216   case Instruction::Load:
5217   case Instruction::Store: {
5218     if (!Legal->isMaskRequired(I))
5219       return false;
5220     auto *Ptr = getLoadStorePointerOperand(I);
5221     auto *Ty = getMemInstValueType(I);
5222     // We have already decided how to vectorize this instruction, get that
5223     // result.
5224     if (VF.isVector()) {
5225       InstWidening WideningDecision = getWideningDecision(I, VF);
5226       assert(WideningDecision != CM_Unknown &&
5227              "Widening decision should be ready at this moment");
5228       return WideningDecision == CM_Scalarize;
5229     }
5230     const Align Alignment = getLoadStoreAlignment(I);
5231     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5232                                 isLegalMaskedGather(Ty, Alignment))
5233                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5234                                 isLegalMaskedScatter(Ty, Alignment));
5235   }
5236   case Instruction::UDiv:
5237   case Instruction::SDiv:
5238   case Instruction::SRem:
5239   case Instruction::URem:
5240     return mayDivideByZero(*I);
5241   }
5242   return false;
5243 }
5244 
5245 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5246     Instruction *I, ElementCount VF) {
5247   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5248   assert(getWideningDecision(I, VF) == CM_Unknown &&
5249          "Decision should not be set yet.");
5250   auto *Group = getInterleavedAccessGroup(I);
5251   assert(Group && "Must have a group.");
5252 
5253   // If the instruction's allocated size doesn't equal it's type size, it
5254   // requires padding and will be scalarized.
5255   auto &DL = I->getModule()->getDataLayout();
5256   auto *ScalarTy = getMemInstValueType(I);
5257   if (hasIrregularType(ScalarTy, DL, VF))
5258     return false;
5259 
5260   // Check if masking is required.
5261   // A Group may need masking for one of two reasons: it resides in a block that
5262   // needs predication, or it was decided to use masking to deal with gaps.
5263   bool PredicatedAccessRequiresMasking =
5264       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5265   bool AccessWithGapsRequiresMasking =
5266       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5267   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5268     return true;
5269 
5270   // If masked interleaving is required, we expect that the user/target had
5271   // enabled it, because otherwise it either wouldn't have been created or
5272   // it should have been invalidated by the CostModel.
5273   assert(useMaskedInterleavedAccesses(TTI) &&
5274          "Masked interleave-groups for predicated accesses are not enabled.");
5275 
5276   auto *Ty = getMemInstValueType(I);
5277   const Align Alignment = getLoadStoreAlignment(I);
5278   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5279                           : TTI.isLegalMaskedStore(Ty, Alignment);
5280 }
5281 
5282 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5283     Instruction *I, ElementCount VF) {
5284   // Get and ensure we have a valid memory instruction.
5285   LoadInst *LI = dyn_cast<LoadInst>(I);
5286   StoreInst *SI = dyn_cast<StoreInst>(I);
5287   assert((LI || SI) && "Invalid memory instruction");
5288 
5289   auto *Ptr = getLoadStorePointerOperand(I);
5290 
5291   // In order to be widened, the pointer should be consecutive, first of all.
5292   if (!Legal->isConsecutivePtr(Ptr))
5293     return false;
5294 
5295   // If the instruction is a store located in a predicated block, it will be
5296   // scalarized.
5297   if (isScalarWithPredication(I))
5298     return false;
5299 
5300   // If the instruction's allocated size doesn't equal it's type size, it
5301   // requires padding and will be scalarized.
5302   auto &DL = I->getModule()->getDataLayout();
5303   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5304   if (hasIrregularType(ScalarTy, DL, VF))
5305     return false;
5306 
5307   return true;
5308 }
5309 
5310 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5311   // We should not collect Uniforms more than once per VF. Right now,
5312   // this function is called from collectUniformsAndScalars(), which
5313   // already does this check. Collecting Uniforms for VF=1 does not make any
5314   // sense.
5315 
5316   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5317          "This function should not be visited twice for the same VF");
5318 
5319   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5320   // not analyze again.  Uniforms.count(VF) will return 1.
5321   Uniforms[VF].clear();
5322 
5323   // We now know that the loop is vectorizable!
5324   // Collect instructions inside the loop that will remain uniform after
5325   // vectorization.
5326 
5327   // Global values, params and instructions outside of current loop are out of
5328   // scope.
5329   auto isOutOfScope = [&](Value *V) -> bool {
5330     Instruction *I = dyn_cast<Instruction>(V);
5331     return (!I || !TheLoop->contains(I));
5332   };
5333 
5334   SetVector<Instruction *> Worklist;
5335   BasicBlock *Latch = TheLoop->getLoopLatch();
5336 
5337   // Instructions that are scalar with predication must not be considered
5338   // uniform after vectorization, because that would create an erroneous
5339   // replicating region where only a single instance out of VF should be formed.
5340   // TODO: optimize such seldom cases if found important, see PR40816.
5341   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5342     if (isOutOfScope(I)) {
5343       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5344                         << *I << "\n");
5345       return;
5346     }
5347     if (isScalarWithPredication(I, VF)) {
5348       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5349                         << *I << "\n");
5350       return;
5351     }
5352     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5353     Worklist.insert(I);
5354   };
5355 
5356   // Start with the conditional branch. If the branch condition is an
5357   // instruction contained in the loop that is only used by the branch, it is
5358   // uniform.
5359   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5360   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5361     addToWorklistIfAllowed(Cmp);
5362 
5363   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5364     InstWidening WideningDecision = getWideningDecision(I, VF);
5365     assert(WideningDecision != CM_Unknown &&
5366            "Widening decision should be ready at this moment");
5367 
5368     // A uniform memory op is itself uniform.  We exclude uniform stores
5369     // here as they demand the last lane, not the first one.
5370     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5371       assert(WideningDecision == CM_Scalarize);
5372       return true;
5373     }
5374 
5375     return (WideningDecision == CM_Widen ||
5376             WideningDecision == CM_Widen_Reverse ||
5377             WideningDecision == CM_Interleave);
5378   };
5379 
5380 
5381   // Returns true if Ptr is the pointer operand of a memory access instruction
5382   // I, and I is known to not require scalarization.
5383   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5384     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5385   };
5386 
5387   // Holds a list of values which are known to have at least one uniform use.
5388   // Note that there may be other uses which aren't uniform.  A "uniform use"
5389   // here is something which only demands lane 0 of the unrolled iterations;
5390   // it does not imply that all lanes produce the same value (e.g. this is not
5391   // the usual meaning of uniform)
5392   SmallPtrSet<Value *, 8> HasUniformUse;
5393 
5394   // Scan the loop for instructions which are either a) known to have only
5395   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5396   for (auto *BB : TheLoop->blocks())
5397     for (auto &I : *BB) {
5398       // If there's no pointer operand, there's nothing to do.
5399       auto *Ptr = getLoadStorePointerOperand(&I);
5400       if (!Ptr)
5401         continue;
5402 
5403       // A uniform memory op is itself uniform.  We exclude uniform stores
5404       // here as they demand the last lane, not the first one.
5405       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5406         addToWorklistIfAllowed(&I);
5407 
5408       if (isUniformDecision(&I, VF)) {
5409         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5410         HasUniformUse.insert(Ptr);
5411       }
5412     }
5413 
5414   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5415   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5416   // disallows uses outside the loop as well.
5417   for (auto *V : HasUniformUse) {
5418     if (isOutOfScope(V))
5419       continue;
5420     auto *I = cast<Instruction>(V);
5421     auto UsersAreMemAccesses =
5422       llvm::all_of(I->users(), [&](User *U) -> bool {
5423         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5424       });
5425     if (UsersAreMemAccesses)
5426       addToWorklistIfAllowed(I);
5427   }
5428 
5429   // Expand Worklist in topological order: whenever a new instruction
5430   // is added , its users should be already inside Worklist.  It ensures
5431   // a uniform instruction will only be used by uniform instructions.
5432   unsigned idx = 0;
5433   while (idx != Worklist.size()) {
5434     Instruction *I = Worklist[idx++];
5435 
5436     for (auto OV : I->operand_values()) {
5437       // isOutOfScope operands cannot be uniform instructions.
5438       if (isOutOfScope(OV))
5439         continue;
5440       // First order recurrence Phi's should typically be considered
5441       // non-uniform.
5442       auto *OP = dyn_cast<PHINode>(OV);
5443       if (OP && Legal->isFirstOrderRecurrence(OP))
5444         continue;
5445       // If all the users of the operand are uniform, then add the
5446       // operand into the uniform worklist.
5447       auto *OI = cast<Instruction>(OV);
5448       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5449             auto *J = cast<Instruction>(U);
5450             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5451           }))
5452         addToWorklistIfAllowed(OI);
5453     }
5454   }
5455 
5456   // For an instruction to be added into Worklist above, all its users inside
5457   // the loop should also be in Worklist. However, this condition cannot be
5458   // true for phi nodes that form a cyclic dependence. We must process phi
5459   // nodes separately. An induction variable will remain uniform if all users
5460   // of the induction variable and induction variable update remain uniform.
5461   // The code below handles both pointer and non-pointer induction variables.
5462   for (auto &Induction : Legal->getInductionVars()) {
5463     auto *Ind = Induction.first;
5464     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5465 
5466     // Determine if all users of the induction variable are uniform after
5467     // vectorization.
5468     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5469       auto *I = cast<Instruction>(U);
5470       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5471              isVectorizedMemAccessUse(I, Ind);
5472     });
5473     if (!UniformInd)
5474       continue;
5475 
5476     // Determine if all users of the induction variable update instruction are
5477     // uniform after vectorization.
5478     auto UniformIndUpdate =
5479         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5480           auto *I = cast<Instruction>(U);
5481           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5482                  isVectorizedMemAccessUse(I, IndUpdate);
5483         });
5484     if (!UniformIndUpdate)
5485       continue;
5486 
5487     // The induction variable and its update instruction will remain uniform.
5488     addToWorklistIfAllowed(Ind);
5489     addToWorklistIfAllowed(IndUpdate);
5490   }
5491 
5492   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5493 }
5494 
5495 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5496   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5497 
5498   if (Legal->getRuntimePointerChecking()->Need) {
5499     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5500         "runtime pointer checks needed. Enable vectorization of this "
5501         "loop with '#pragma clang loop vectorize(enable)' when "
5502         "compiling with -Os/-Oz",
5503         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5504     return true;
5505   }
5506 
5507   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5508     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5509         "runtime SCEV checks needed. Enable vectorization of this "
5510         "loop with '#pragma clang loop vectorize(enable)' when "
5511         "compiling with -Os/-Oz",
5512         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5513     return true;
5514   }
5515 
5516   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5517   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5518     reportVectorizationFailure("Runtime stride check for small trip count",
5519         "runtime stride == 1 checks needed. Enable vectorization of "
5520         "this loop without such check by compiling with -Os/-Oz",
5521         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5522     return true;
5523   }
5524 
5525   return false;
5526 }
5527 
5528 Optional<ElementCount>
5529 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5530   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5531     // TODO: It may by useful to do since it's still likely to be dynamically
5532     // uniform if the target can skip.
5533     reportVectorizationFailure(
5534         "Not inserting runtime ptr check for divergent target",
5535         "runtime pointer checks needed. Not enabled for divergent target",
5536         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5537     return None;
5538   }
5539 
5540   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5541   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5542   if (TC == 1) {
5543     reportVectorizationFailure("Single iteration (non) loop",
5544         "loop trip count is one, irrelevant for vectorization",
5545         "SingleIterationLoop", ORE, TheLoop);
5546     return None;
5547   }
5548 
5549   switch (ScalarEpilogueStatus) {
5550   case CM_ScalarEpilogueAllowed:
5551     return computeFeasibleMaxVF(TC, UserVF);
5552   case CM_ScalarEpilogueNotAllowedUsePredicate:
5553     LLVM_FALLTHROUGH;
5554   case CM_ScalarEpilogueNotNeededUsePredicate:
5555     LLVM_DEBUG(
5556         dbgs() << "LV: vector predicate hint/switch found.\n"
5557                << "LV: Not allowing scalar epilogue, creating predicated "
5558                << "vector loop.\n");
5559     break;
5560   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5561     // fallthrough as a special case of OptForSize
5562   case CM_ScalarEpilogueNotAllowedOptSize:
5563     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5564       LLVM_DEBUG(
5565           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5566     else
5567       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5568                         << "count.\n");
5569 
5570     // Bail if runtime checks are required, which are not good when optimising
5571     // for size.
5572     if (runtimeChecksRequired())
5573       return None;
5574 
5575     break;
5576   }
5577 
5578   // The only loops we can vectorize without a scalar epilogue, are loops with
5579   // a bottom-test and a single exiting block. We'd have to handle the fact
5580   // that not every instruction executes on the last iteration.  This will
5581   // require a lane mask which varies through the vector loop body.  (TODO)
5582   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5583     // If there was a tail-folding hint/switch, but we can't fold the tail by
5584     // masking, fallback to a vectorization with a scalar epilogue.
5585     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5586       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5587                            "scalar epilogue instead.\n");
5588       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5589       return computeFeasibleMaxVF(TC, UserVF);
5590     }
5591     return None;
5592   }
5593 
5594   // Now try the tail folding
5595 
5596   // Invalidate interleave groups that require an epilogue if we can't mask
5597   // the interleave-group.
5598   if (!useMaskedInterleavedAccesses(TTI)) {
5599     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5600            "No decisions should have been taken at this point");
5601     // Note: There is no need to invalidate any cost modeling decisions here, as
5602     // non where taken so far.
5603     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5604   }
5605 
5606   ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF);
5607   assert(!MaxVF.isScalable() &&
5608          "Scalable vectors do not yet support tail folding");
5609   assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) &&
5610          "MaxVF must be a power of 2");
5611   unsigned MaxVFtimesIC =
5612       UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue();
5613   // Avoid tail folding if the trip count is known to be a multiple of any VF we
5614   // chose.
5615   ScalarEvolution *SE = PSE.getSE();
5616   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5617   const SCEV *ExitCount = SE->getAddExpr(
5618       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5619   const SCEV *Rem = SE->getURemExpr(
5620       SE->applyLoopGuards(ExitCount, TheLoop),
5621       SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5622   if (Rem->isZero()) {
5623     // Accept MaxVF if we do not have a tail.
5624     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5625     return MaxVF;
5626   }
5627 
5628   // If we don't know the precise trip count, or if the trip count that we
5629   // found modulo the vectorization factor is not zero, try to fold the tail
5630   // by masking.
5631   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5632   if (Legal->prepareToFoldTailByMasking()) {
5633     FoldTailByMasking = true;
5634     return MaxVF;
5635   }
5636 
5637   // If there was a tail-folding hint/switch, but we can't fold the tail by
5638   // masking, fallback to a vectorization with a scalar epilogue.
5639   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5640     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5641                          "scalar epilogue instead.\n");
5642     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5643     return MaxVF;
5644   }
5645 
5646   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5647     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5648     return None;
5649   }
5650 
5651   if (TC == 0) {
5652     reportVectorizationFailure(
5653         "Unable to calculate the loop count due to complex control flow",
5654         "unable to calculate the loop count due to complex control flow",
5655         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5656     return None;
5657   }
5658 
5659   reportVectorizationFailure(
5660       "Cannot optimize for size and vectorize at the same time.",
5661       "cannot optimize for size and vectorize at the same time. "
5662       "Enable vectorization of this loop with '#pragma clang loop "
5663       "vectorize(enable)' when compiling with -Os/-Oz",
5664       "NoTailLoopWithOptForSize", ORE, TheLoop);
5665   return None;
5666 }
5667 
5668 ElementCount
5669 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5670                                                  ElementCount UserVF) {
5671   bool IgnoreScalableUserVF = UserVF.isScalable() &&
5672                               !TTI.supportsScalableVectors() &&
5673                               !ForceTargetSupportsScalableVectors;
5674   if (IgnoreScalableUserVF) {
5675     LLVM_DEBUG(
5676         dbgs() << "LV: Ignoring VF=" << UserVF
5677                << " because target does not support scalable vectors.\n");
5678     ORE->emit([&]() {
5679       return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF",
5680                                         TheLoop->getStartLoc(),
5681                                         TheLoop->getHeader())
5682              << "Ignoring VF=" << ore::NV("UserVF", UserVF)
5683              << " because target does not support scalable vectors.";
5684     });
5685   }
5686 
5687   // Beyond this point two scenarios are handled. If UserVF isn't specified
5688   // then a suitable VF is chosen. If UserVF is specified and there are
5689   // dependencies, check if it's legal. However, if a UserVF is specified and
5690   // there are no dependencies, then there's nothing to do.
5691   if (UserVF.isNonZero() && !IgnoreScalableUserVF &&
5692       Legal->isSafeForAnyVectorWidth())
5693     return UserVF;
5694 
5695   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5696   unsigned SmallestType, WidestType;
5697   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5698   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5699 
5700   // Get the maximum safe dependence distance in bits computed by LAA.
5701   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5702   // the memory accesses that is most restrictive (involved in the smallest
5703   // dependence distance).
5704   unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits();
5705 
5706   // If the user vectorization factor is legally unsafe, clamp it to a safe
5707   // value. Otherwise, return as is.
5708   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5709     unsigned MaxSafeElements =
5710         PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType);
5711     ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements);
5712 
5713     if (UserVF.isScalable()) {
5714       Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5715 
5716       // Scale VF by vscale before checking if it's safe.
5717       MaxSafeVF = ElementCount::getScalable(
5718           MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5719 
5720       if (MaxSafeVF.isZero()) {
5721         // The dependence distance is too small to use scalable vectors,
5722         // fallback on fixed.
5723         LLVM_DEBUG(
5724             dbgs()
5725             << "LV: Max legal vector width too small, scalable vectorization "
5726                "unfeasible. Using fixed-width vectorization instead.\n");
5727         ORE->emit([&]() {
5728           return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible",
5729                                             TheLoop->getStartLoc(),
5730                                             TheLoop->getHeader())
5731                  << "Max legal vector width too small, scalable vectorization "
5732                  << "unfeasible. Using fixed-width vectorization instead.";
5733         });
5734         return computeFeasibleMaxVF(
5735             ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5736       }
5737     }
5738 
5739     LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n");
5740 
5741     if (ElementCount::isKnownLE(UserVF, MaxSafeVF))
5742       return UserVF;
5743 
5744     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5745                       << " is unsafe, clamping to max safe VF=" << MaxSafeVF
5746                       << ".\n");
5747     ORE->emit([&]() {
5748       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5749                                         TheLoop->getStartLoc(),
5750                                         TheLoop->getHeader())
5751              << "User-specified vectorization factor "
5752              << ore::NV("UserVectorizationFactor", UserVF)
5753              << " is unsafe, clamping to maximum safe vectorization factor "
5754              << ore::NV("VectorizationFactor", MaxSafeVF);
5755     });
5756     return MaxSafeVF;
5757   }
5758 
5759   WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits);
5760 
5761   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5762   // Note that both WidestRegister and WidestType may not be a powers of 2.
5763   unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType);
5764 
5765   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5766                     << " / " << WidestType << " bits.\n");
5767   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5768                     << WidestRegister << " bits.\n");
5769 
5770   assert(MaxVectorSize <= WidestRegister &&
5771          "Did not expect to pack so many elements"
5772          " into one vector!");
5773   if (MaxVectorSize == 0) {
5774     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5775     MaxVectorSize = 1;
5776     return ElementCount::getFixed(MaxVectorSize);
5777   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5778              isPowerOf2_32(ConstTripCount)) {
5779     // We need to clamp the VF to be the ConstTripCount. There is no point in
5780     // choosing a higher viable VF as done in the loop below.
5781     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5782                       << ConstTripCount << "\n");
5783     MaxVectorSize = ConstTripCount;
5784     return ElementCount::getFixed(MaxVectorSize);
5785   }
5786 
5787   unsigned MaxVF = MaxVectorSize;
5788   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5789       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5790     // Collect all viable vectorization factors larger than the default MaxVF
5791     // (i.e. MaxVectorSize).
5792     SmallVector<ElementCount, 8> VFs;
5793     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5794     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5795       VFs.push_back(ElementCount::getFixed(VS));
5796 
5797     // For each VF calculate its register usage.
5798     auto RUs = calculateRegisterUsage(VFs);
5799 
5800     // Select the largest VF which doesn't require more registers than existing
5801     // ones.
5802     for (int i = RUs.size() - 1; i >= 0; --i) {
5803       bool Selected = true;
5804       for (auto& pair : RUs[i].MaxLocalUsers) {
5805         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5806         if (pair.second > TargetNumRegisters)
5807           Selected = false;
5808       }
5809       if (Selected) {
5810         MaxVF = VFs[i].getKnownMinValue();
5811         break;
5812       }
5813     }
5814     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5815       if (MaxVF < MinVF) {
5816         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5817                           << ") with target's minimum: " << MinVF << '\n');
5818         MaxVF = MinVF;
5819       }
5820     }
5821   }
5822   return ElementCount::getFixed(MaxVF);
5823 }
5824 
5825 VectorizationFactor
5826 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) {
5827   // FIXME: This can be fixed for scalable vectors later, because at this stage
5828   // the LoopVectorizer will only consider vectorizing a loop with scalable
5829   // vectors when the loop has a hint to enable vectorization for a given VF.
5830   assert(!MaxVF.isScalable() && "scalable vectors not yet supported");
5831 
5832   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5833   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5834   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5835 
5836   unsigned Width = 1;
5837   const float ScalarCost = *ExpectedCost.getValue();
5838   float Cost = ScalarCost;
5839 
5840   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5841   if (ForceVectorization && MaxVF.isVector()) {
5842     // Ignore scalar width, because the user explicitly wants vectorization.
5843     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5844     // evaluation.
5845     Cost = std::numeric_limits<float>::max();
5846   }
5847 
5848   for (unsigned i = 2; i <= MaxVF.getFixedValue(); i *= 2) {
5849     // Notice that the vector loop needs to be executed less times, so
5850     // we need to divide the cost of the vector loops by the width of
5851     // the vector elements.
5852     VectorizationCostTy C = expectedCost(ElementCount::getFixed(i));
5853     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
5854     float VectorCost = *C.first.getValue() / (float)i;
5855     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5856                       << " costs: " << (int)VectorCost << ".\n");
5857     if (!C.second && !ForceVectorization) {
5858       LLVM_DEBUG(
5859           dbgs() << "LV: Not considering vector loop of width " << i
5860                  << " because it will not generate any vector instructions.\n");
5861       continue;
5862     }
5863 
5864     // If profitable add it to ProfitableVF list.
5865     if (VectorCost < ScalarCost) {
5866       ProfitableVFs.push_back(VectorizationFactor(
5867           {ElementCount::getFixed(i), (unsigned)VectorCost}));
5868     }
5869 
5870     if (VectorCost < Cost) {
5871       Cost = VectorCost;
5872       Width = i;
5873     }
5874   }
5875 
5876   if (!EnableCondStoresVectorization && NumPredStores) {
5877     reportVectorizationFailure("There are conditional stores.",
5878         "store that is conditionally executed prevents vectorization",
5879         "ConditionalStore", ORE, TheLoop);
5880     Width = 1;
5881     Cost = ScalarCost;
5882   }
5883 
5884   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5885              << "LV: Vectorization seems to be not beneficial, "
5886              << "but was forced by a user.\n");
5887   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5888   VectorizationFactor Factor = {ElementCount::getFixed(Width),
5889                                 (unsigned)(Width * Cost)};
5890   return Factor;
5891 }
5892 
5893 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5894     const Loop &L, ElementCount VF) const {
5895   // Cross iteration phis such as reductions need special handling and are
5896   // currently unsupported.
5897   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5898         return Legal->isFirstOrderRecurrence(&Phi) ||
5899                Legal->isReductionVariable(&Phi);
5900       }))
5901     return false;
5902 
5903   // Phis with uses outside of the loop require special handling and are
5904   // currently unsupported.
5905   for (auto &Entry : Legal->getInductionVars()) {
5906     // Look for uses of the value of the induction at the last iteration.
5907     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5908     for (User *U : PostInc->users())
5909       if (!L.contains(cast<Instruction>(U)))
5910         return false;
5911     // Look for uses of penultimate value of the induction.
5912     for (User *U : Entry.first->users())
5913       if (!L.contains(cast<Instruction>(U)))
5914         return false;
5915   }
5916 
5917   // Induction variables that are widened require special handling that is
5918   // currently not supported.
5919   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5920         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5921                  this->isProfitableToScalarize(Entry.first, VF));
5922       }))
5923     return false;
5924 
5925   return true;
5926 }
5927 
5928 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5929     const ElementCount VF) const {
5930   // FIXME: We need a much better cost-model to take different parameters such
5931   // as register pressure, code size increase and cost of extra branches into
5932   // account. For now we apply a very crude heuristic and only consider loops
5933   // with vectorization factors larger than a certain value.
5934   // We also consider epilogue vectorization unprofitable for targets that don't
5935   // consider interleaving beneficial (eg. MVE).
5936   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5937     return false;
5938   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5939     return true;
5940   return false;
5941 }
5942 
5943 VectorizationFactor
5944 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5945     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5946   VectorizationFactor Result = VectorizationFactor::Disabled();
5947   if (!EnableEpilogueVectorization) {
5948     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5949     return Result;
5950   }
5951 
5952   if (!isScalarEpilogueAllowed()) {
5953     LLVM_DEBUG(
5954         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5955                   "allowed.\n";);
5956     return Result;
5957   }
5958 
5959   // FIXME: This can be fixed for scalable vectors later, because at this stage
5960   // the LoopVectorizer will only consider vectorizing a loop with scalable
5961   // vectors when the loop has a hint to enable vectorization for a given VF.
5962   if (MainLoopVF.isScalable()) {
5963     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
5964                          "yet supported.\n");
5965     return Result;
5966   }
5967 
5968   // Not really a cost consideration, but check for unsupported cases here to
5969   // simplify the logic.
5970   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5971     LLVM_DEBUG(
5972         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5973                   "not a supported candidate.\n";);
5974     return Result;
5975   }
5976 
5977   if (EpilogueVectorizationForceVF > 1) {
5978     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5979     if (LVP.hasPlanWithVFs(
5980             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
5981       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
5982     else {
5983       LLVM_DEBUG(
5984           dbgs()
5985               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5986       return Result;
5987     }
5988   }
5989 
5990   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5991       TheLoop->getHeader()->getParent()->hasMinSize()) {
5992     LLVM_DEBUG(
5993         dbgs()
5994             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5995     return Result;
5996   }
5997 
5998   if (!isEpilogueVectorizationProfitable(MainLoopVF))
5999     return Result;
6000 
6001   for (auto &NextVF : ProfitableVFs)
6002     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6003         (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) &&
6004         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6005       Result = NextVF;
6006 
6007   if (Result != VectorizationFactor::Disabled())
6008     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6009                       << Result.Width.getFixedValue() << "\n";);
6010   return Result;
6011 }
6012 
6013 std::pair<unsigned, unsigned>
6014 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6015   unsigned MinWidth = -1U;
6016   unsigned MaxWidth = 8;
6017   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6018 
6019   // For each block.
6020   for (BasicBlock *BB : TheLoop->blocks()) {
6021     // For each instruction in the loop.
6022     for (Instruction &I : BB->instructionsWithoutDebug()) {
6023       Type *T = I.getType();
6024 
6025       // Skip ignored values.
6026       if (ValuesToIgnore.count(&I))
6027         continue;
6028 
6029       // Only examine Loads, Stores and PHINodes.
6030       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6031         continue;
6032 
6033       // Examine PHI nodes that are reduction variables. Update the type to
6034       // account for the recurrence type.
6035       if (auto *PN = dyn_cast<PHINode>(&I)) {
6036         if (!Legal->isReductionVariable(PN))
6037           continue;
6038         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
6039         if (PreferInLoopReductions ||
6040             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6041                                       RdxDesc.getRecurrenceType(),
6042                                       TargetTransformInfo::ReductionFlags()))
6043           continue;
6044         T = RdxDesc.getRecurrenceType();
6045       }
6046 
6047       // Examine the stored values.
6048       if (auto *ST = dyn_cast<StoreInst>(&I))
6049         T = ST->getValueOperand()->getType();
6050 
6051       // Ignore loaded pointer types and stored pointer types that are not
6052       // vectorizable.
6053       //
6054       // FIXME: The check here attempts to predict whether a load or store will
6055       //        be vectorized. We only know this for certain after a VF has
6056       //        been selected. Here, we assume that if an access can be
6057       //        vectorized, it will be. We should also look at extending this
6058       //        optimization to non-pointer types.
6059       //
6060       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6061           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6062         continue;
6063 
6064       MinWidth = std::min(MinWidth,
6065                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6066       MaxWidth = std::max(MaxWidth,
6067                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6068     }
6069   }
6070 
6071   return {MinWidth, MaxWidth};
6072 }
6073 
6074 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6075                                                            unsigned LoopCost) {
6076   // -- The interleave heuristics --
6077   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6078   // There are many micro-architectural considerations that we can't predict
6079   // at this level. For example, frontend pressure (on decode or fetch) due to
6080   // code size, or the number and capabilities of the execution ports.
6081   //
6082   // We use the following heuristics to select the interleave count:
6083   // 1. If the code has reductions, then we interleave to break the cross
6084   // iteration dependency.
6085   // 2. If the loop is really small, then we interleave to reduce the loop
6086   // overhead.
6087   // 3. We don't interleave if we think that we will spill registers to memory
6088   // due to the increased register pressure.
6089 
6090   if (!isScalarEpilogueAllowed())
6091     return 1;
6092 
6093   // We used the distance for the interleave count.
6094   if (Legal->getMaxSafeDepDistBytes() != -1U)
6095     return 1;
6096 
6097   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6098   const bool HasReductions = !Legal->getReductionVars().empty();
6099   // Do not interleave loops with a relatively small known or estimated trip
6100   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6101   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6102   // because with the above conditions interleaving can expose ILP and break
6103   // cross iteration dependences for reductions.
6104   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6105       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6106     return 1;
6107 
6108   RegisterUsage R = calculateRegisterUsage({VF})[0];
6109   // We divide by these constants so assume that we have at least one
6110   // instruction that uses at least one register.
6111   for (auto& pair : R.MaxLocalUsers) {
6112     pair.second = std::max(pair.second, 1U);
6113   }
6114 
6115   // We calculate the interleave count using the following formula.
6116   // Subtract the number of loop invariants from the number of available
6117   // registers. These registers are used by all of the interleaved instances.
6118   // Next, divide the remaining registers by the number of registers that is
6119   // required by the loop, in order to estimate how many parallel instances
6120   // fit without causing spills. All of this is rounded down if necessary to be
6121   // a power of two. We want power of two interleave count to simplify any
6122   // addressing operations or alignment considerations.
6123   // We also want power of two interleave counts to ensure that the induction
6124   // variable of the vector loop wraps to zero, when tail is folded by masking;
6125   // this currently happens when OptForSize, in which case IC is set to 1 above.
6126   unsigned IC = UINT_MAX;
6127 
6128   for (auto& pair : R.MaxLocalUsers) {
6129     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6130     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6131                       << " registers of "
6132                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6133     if (VF.isScalar()) {
6134       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6135         TargetNumRegisters = ForceTargetNumScalarRegs;
6136     } else {
6137       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6138         TargetNumRegisters = ForceTargetNumVectorRegs;
6139     }
6140     unsigned MaxLocalUsers = pair.second;
6141     unsigned LoopInvariantRegs = 0;
6142     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6143       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6144 
6145     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6146     // Don't count the induction variable as interleaved.
6147     if (EnableIndVarRegisterHeur) {
6148       TmpIC =
6149           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6150                         std::max(1U, (MaxLocalUsers - 1)));
6151     }
6152 
6153     IC = std::min(IC, TmpIC);
6154   }
6155 
6156   // Clamp the interleave ranges to reasonable counts.
6157   unsigned MaxInterleaveCount =
6158       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6159 
6160   // Check if the user has overridden the max.
6161   if (VF.isScalar()) {
6162     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6163       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6164   } else {
6165     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6166       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6167   }
6168 
6169   // If trip count is known or estimated compile time constant, limit the
6170   // interleave count to be less than the trip count divided by VF, provided it
6171   // is at least 1.
6172   //
6173   // For scalable vectors we can't know if interleaving is beneficial. It may
6174   // not be beneficial for small loops if none of the lanes in the second vector
6175   // iterations is enabled. However, for larger loops, there is likely to be a
6176   // similar benefit as for fixed-width vectors. For now, we choose to leave
6177   // the InterleaveCount as if vscale is '1', although if some information about
6178   // the vector is known (e.g. min vector size), we can make a better decision.
6179   if (BestKnownTC) {
6180     MaxInterleaveCount =
6181         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6182     // Make sure MaxInterleaveCount is greater than 0.
6183     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6184   }
6185 
6186   assert(MaxInterleaveCount > 0 &&
6187          "Maximum interleave count must be greater than 0");
6188 
6189   // Clamp the calculated IC to be between the 1 and the max interleave count
6190   // that the target and trip count allows.
6191   if (IC > MaxInterleaveCount)
6192     IC = MaxInterleaveCount;
6193   else
6194     // Make sure IC is greater than 0.
6195     IC = std::max(1u, IC);
6196 
6197   assert(IC > 0 && "Interleave count must be greater than 0.");
6198 
6199   // If we did not calculate the cost for VF (because the user selected the VF)
6200   // then we calculate the cost of VF here.
6201   if (LoopCost == 0) {
6202     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6203     LoopCost = *expectedCost(VF).first.getValue();
6204   }
6205 
6206   assert(LoopCost && "Non-zero loop cost expected");
6207 
6208   // Interleave if we vectorized this loop and there is a reduction that could
6209   // benefit from interleaving.
6210   if (VF.isVector() && HasReductions) {
6211     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6212     return IC;
6213   }
6214 
6215   // Note that if we've already vectorized the loop we will have done the
6216   // runtime check and so interleaving won't require further checks.
6217   bool InterleavingRequiresRuntimePointerCheck =
6218       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6219 
6220   // We want to interleave small loops in order to reduce the loop overhead and
6221   // potentially expose ILP opportunities.
6222   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6223                     << "LV: IC is " << IC << '\n'
6224                     << "LV: VF is " << VF << '\n');
6225   const bool AggressivelyInterleaveReductions =
6226       TTI.enableAggressiveInterleaving(HasReductions);
6227   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6228     // We assume that the cost overhead is 1 and we use the cost model
6229     // to estimate the cost of the loop and interleave until the cost of the
6230     // loop overhead is about 5% of the cost of the loop.
6231     unsigned SmallIC =
6232         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6233 
6234     // Interleave until store/load ports (estimated by max interleave count) are
6235     // saturated.
6236     unsigned NumStores = Legal->getNumStores();
6237     unsigned NumLoads = Legal->getNumLoads();
6238     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6239     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6240 
6241     // If we have a scalar reduction (vector reductions are already dealt with
6242     // by this point), we can increase the critical path length if the loop
6243     // we're interleaving is inside another loop. Limit, by default to 2, so the
6244     // critical path only gets increased by one reduction operation.
6245     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6246       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6247       SmallIC = std::min(SmallIC, F);
6248       StoresIC = std::min(StoresIC, F);
6249       LoadsIC = std::min(LoadsIC, F);
6250     }
6251 
6252     if (EnableLoadStoreRuntimeInterleave &&
6253         std::max(StoresIC, LoadsIC) > SmallIC) {
6254       LLVM_DEBUG(
6255           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6256       return std::max(StoresIC, LoadsIC);
6257     }
6258 
6259     // If there are scalar reductions and TTI has enabled aggressive
6260     // interleaving for reductions, we will interleave to expose ILP.
6261     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6262         AggressivelyInterleaveReductions) {
6263       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6264       // Interleave no less than SmallIC but not as aggressive as the normal IC
6265       // to satisfy the rare situation when resources are too limited.
6266       return std::max(IC / 2, SmallIC);
6267     } else {
6268       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6269       return SmallIC;
6270     }
6271   }
6272 
6273   // Interleave if this is a large loop (small loops are already dealt with by
6274   // this point) that could benefit from interleaving.
6275   if (AggressivelyInterleaveReductions) {
6276     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6277     return IC;
6278   }
6279 
6280   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6281   return 1;
6282 }
6283 
6284 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6285 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6286   // This function calculates the register usage by measuring the highest number
6287   // of values that are alive at a single location. Obviously, this is a very
6288   // rough estimation. We scan the loop in a topological order in order and
6289   // assign a number to each instruction. We use RPO to ensure that defs are
6290   // met before their users. We assume that each instruction that has in-loop
6291   // users starts an interval. We record every time that an in-loop value is
6292   // used, so we have a list of the first and last occurrences of each
6293   // instruction. Next, we transpose this data structure into a multi map that
6294   // holds the list of intervals that *end* at a specific location. This multi
6295   // map allows us to perform a linear search. We scan the instructions linearly
6296   // and record each time that a new interval starts, by placing it in a set.
6297   // If we find this value in the multi-map then we remove it from the set.
6298   // The max register usage is the maximum size of the set.
6299   // We also search for instructions that are defined outside the loop, but are
6300   // used inside the loop. We need this number separately from the max-interval
6301   // usage number because when we unroll, loop-invariant values do not take
6302   // more register.
6303   LoopBlocksDFS DFS(TheLoop);
6304   DFS.perform(LI);
6305 
6306   RegisterUsage RU;
6307 
6308   // Each 'key' in the map opens a new interval. The values
6309   // of the map are the index of the 'last seen' usage of the
6310   // instruction that is the key.
6311   using IntervalMap = DenseMap<Instruction *, unsigned>;
6312 
6313   // Maps instruction to its index.
6314   SmallVector<Instruction *, 64> IdxToInstr;
6315   // Marks the end of each interval.
6316   IntervalMap EndPoint;
6317   // Saves the list of instruction indices that are used in the loop.
6318   SmallPtrSet<Instruction *, 8> Ends;
6319   // Saves the list of values that are used in the loop but are
6320   // defined outside the loop, such as arguments and constants.
6321   SmallPtrSet<Value *, 8> LoopInvariants;
6322 
6323   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6324     for (Instruction &I : BB->instructionsWithoutDebug()) {
6325       IdxToInstr.push_back(&I);
6326 
6327       // Save the end location of each USE.
6328       for (Value *U : I.operands()) {
6329         auto *Instr = dyn_cast<Instruction>(U);
6330 
6331         // Ignore non-instruction values such as arguments, constants, etc.
6332         if (!Instr)
6333           continue;
6334 
6335         // If this instruction is outside the loop then record it and continue.
6336         if (!TheLoop->contains(Instr)) {
6337           LoopInvariants.insert(Instr);
6338           continue;
6339         }
6340 
6341         // Overwrite previous end points.
6342         EndPoint[Instr] = IdxToInstr.size();
6343         Ends.insert(Instr);
6344       }
6345     }
6346   }
6347 
6348   // Saves the list of intervals that end with the index in 'key'.
6349   using InstrList = SmallVector<Instruction *, 2>;
6350   DenseMap<unsigned, InstrList> TransposeEnds;
6351 
6352   // Transpose the EndPoints to a list of values that end at each index.
6353   for (auto &Interval : EndPoint)
6354     TransposeEnds[Interval.second].push_back(Interval.first);
6355 
6356   SmallPtrSet<Instruction *, 8> OpenIntervals;
6357   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6358   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6359 
6360   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6361 
6362   // A lambda that gets the register usage for the given type and VF.
6363   const auto &TTICapture = TTI;
6364   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6365     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6366       return 0U;
6367     return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
6368   };
6369 
6370   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6371     Instruction *I = IdxToInstr[i];
6372 
6373     // Remove all of the instructions that end at this location.
6374     InstrList &List = TransposeEnds[i];
6375     for (Instruction *ToRemove : List)
6376       OpenIntervals.erase(ToRemove);
6377 
6378     // Ignore instructions that are never used within the loop.
6379     if (!Ends.count(I))
6380       continue;
6381 
6382     // Skip ignored values.
6383     if (ValuesToIgnore.count(I))
6384       continue;
6385 
6386     // For each VF find the maximum usage of registers.
6387     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6388       // Count the number of live intervals.
6389       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6390 
6391       if (VFs[j].isScalar()) {
6392         for (auto Inst : OpenIntervals) {
6393           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6394           if (RegUsage.find(ClassID) == RegUsage.end())
6395             RegUsage[ClassID] = 1;
6396           else
6397             RegUsage[ClassID] += 1;
6398         }
6399       } else {
6400         collectUniformsAndScalars(VFs[j]);
6401         for (auto Inst : OpenIntervals) {
6402           // Skip ignored values for VF > 1.
6403           if (VecValuesToIgnore.count(Inst))
6404             continue;
6405           if (isScalarAfterVectorization(Inst, VFs[j])) {
6406             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6407             if (RegUsage.find(ClassID) == RegUsage.end())
6408               RegUsage[ClassID] = 1;
6409             else
6410               RegUsage[ClassID] += 1;
6411           } else {
6412             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6413             if (RegUsage.find(ClassID) == RegUsage.end())
6414               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6415             else
6416               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6417           }
6418         }
6419       }
6420 
6421       for (auto& pair : RegUsage) {
6422         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6423           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6424         else
6425           MaxUsages[j][pair.first] = pair.second;
6426       }
6427     }
6428 
6429     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6430                       << OpenIntervals.size() << '\n');
6431 
6432     // Add the current instruction to the list of open intervals.
6433     OpenIntervals.insert(I);
6434   }
6435 
6436   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6437     SmallMapVector<unsigned, unsigned, 4> Invariant;
6438 
6439     for (auto Inst : LoopInvariants) {
6440       unsigned Usage =
6441           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6442       unsigned ClassID =
6443           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6444       if (Invariant.find(ClassID) == Invariant.end())
6445         Invariant[ClassID] = Usage;
6446       else
6447         Invariant[ClassID] += Usage;
6448     }
6449 
6450     LLVM_DEBUG({
6451       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6452       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6453              << " item\n";
6454       for (const auto &pair : MaxUsages[i]) {
6455         dbgs() << "LV(REG): RegisterClass: "
6456                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6457                << " registers\n";
6458       }
6459       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6460              << " item\n";
6461       for (const auto &pair : Invariant) {
6462         dbgs() << "LV(REG): RegisterClass: "
6463                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6464                << " registers\n";
6465       }
6466     });
6467 
6468     RU.LoopInvariantRegs = Invariant;
6469     RU.MaxLocalUsers = MaxUsages[i];
6470     RUs[i] = RU;
6471   }
6472 
6473   return RUs;
6474 }
6475 
6476 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6477   // TODO: Cost model for emulated masked load/store is completely
6478   // broken. This hack guides the cost model to use an artificially
6479   // high enough value to practically disable vectorization with such
6480   // operations, except where previously deployed legality hack allowed
6481   // using very low cost values. This is to avoid regressions coming simply
6482   // from moving "masked load/store" check from legality to cost model.
6483   // Masked Load/Gather emulation was previously never allowed.
6484   // Limited number of Masked Store/Scatter emulation was allowed.
6485   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
6486   return isa<LoadInst>(I) ||
6487          (isa<StoreInst>(I) &&
6488           NumPredStores > NumberOfStoresToPredicate);
6489 }
6490 
6491 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6492   // If we aren't vectorizing the loop, or if we've already collected the
6493   // instructions to scalarize, there's nothing to do. Collection may already
6494   // have occurred if we have a user-selected VF and are now computing the
6495   // expected cost for interleaving.
6496   if (VF.isScalar() || VF.isZero() ||
6497       InstsToScalarize.find(VF) != InstsToScalarize.end())
6498     return;
6499 
6500   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6501   // not profitable to scalarize any instructions, the presence of VF in the
6502   // map will indicate that we've analyzed it already.
6503   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6504 
6505   // Find all the instructions that are scalar with predication in the loop and
6506   // determine if it would be better to not if-convert the blocks they are in.
6507   // If so, we also record the instructions to scalarize.
6508   for (BasicBlock *BB : TheLoop->blocks()) {
6509     if (!blockNeedsPredication(BB))
6510       continue;
6511     for (Instruction &I : *BB)
6512       if (isScalarWithPredication(&I)) {
6513         ScalarCostsTy ScalarCosts;
6514         // Do not apply discount logic if hacked cost is needed
6515         // for emulated masked memrefs.
6516         if (!useEmulatedMaskMemRefHack(&I) &&
6517             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6518           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6519         // Remember that BB will remain after vectorization.
6520         PredicatedBBsAfterVectorization.insert(BB);
6521       }
6522   }
6523 }
6524 
6525 int LoopVectorizationCostModel::computePredInstDiscount(
6526     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6527   assert(!isUniformAfterVectorization(PredInst, VF) &&
6528          "Instruction marked uniform-after-vectorization will be predicated");
6529 
6530   // Initialize the discount to zero, meaning that the scalar version and the
6531   // vector version cost the same.
6532   InstructionCost Discount = 0;
6533 
6534   // Holds instructions to analyze. The instructions we visit are mapped in
6535   // ScalarCosts. Those instructions are the ones that would be scalarized if
6536   // we find that the scalar version costs less.
6537   SmallVector<Instruction *, 8> Worklist;
6538 
6539   // Returns true if the given instruction can be scalarized.
6540   auto canBeScalarized = [&](Instruction *I) -> bool {
6541     // We only attempt to scalarize instructions forming a single-use chain
6542     // from the original predicated block that would otherwise be vectorized.
6543     // Although not strictly necessary, we give up on instructions we know will
6544     // already be scalar to avoid traversing chains that are unlikely to be
6545     // beneficial.
6546     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6547         isScalarAfterVectorization(I, VF))
6548       return false;
6549 
6550     // If the instruction is scalar with predication, it will be analyzed
6551     // separately. We ignore it within the context of PredInst.
6552     if (isScalarWithPredication(I))
6553       return false;
6554 
6555     // If any of the instruction's operands are uniform after vectorization,
6556     // the instruction cannot be scalarized. This prevents, for example, a
6557     // masked load from being scalarized.
6558     //
6559     // We assume we will only emit a value for lane zero of an instruction
6560     // marked uniform after vectorization, rather than VF identical values.
6561     // Thus, if we scalarize an instruction that uses a uniform, we would
6562     // create uses of values corresponding to the lanes we aren't emitting code
6563     // for. This behavior can be changed by allowing getScalarValue to clone
6564     // the lane zero values for uniforms rather than asserting.
6565     for (Use &U : I->operands())
6566       if (auto *J = dyn_cast<Instruction>(U.get()))
6567         if (isUniformAfterVectorization(J, VF))
6568           return false;
6569 
6570     // Otherwise, we can scalarize the instruction.
6571     return true;
6572   };
6573 
6574   // Compute the expected cost discount from scalarizing the entire expression
6575   // feeding the predicated instruction. We currently only consider expressions
6576   // that are single-use instruction chains.
6577   Worklist.push_back(PredInst);
6578   while (!Worklist.empty()) {
6579     Instruction *I = Worklist.pop_back_val();
6580 
6581     // If we've already analyzed the instruction, there's nothing to do.
6582     if (ScalarCosts.find(I) != ScalarCosts.end())
6583       continue;
6584 
6585     // Compute the cost of the vector instruction. Note that this cost already
6586     // includes the scalarization overhead of the predicated instruction.
6587     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6588 
6589     // Compute the cost of the scalarized instruction. This cost is the cost of
6590     // the instruction as if it wasn't if-converted and instead remained in the
6591     // predicated block. We will scale this cost by block probability after
6592     // computing the scalarization overhead.
6593     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6594     InstructionCost ScalarCost =
6595         VF.getKnownMinValue() *
6596         getInstructionCost(I, ElementCount::getFixed(1)).first;
6597 
6598     // Compute the scalarization overhead of needed insertelement instructions
6599     // and phi nodes.
6600     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6601       ScalarCost += TTI.getScalarizationOverhead(
6602           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6603           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6604       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6605       ScalarCost +=
6606           VF.getKnownMinValue() *
6607           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6608     }
6609 
6610     // Compute the scalarization overhead of needed extractelement
6611     // instructions. For each of the instruction's operands, if the operand can
6612     // be scalarized, add it to the worklist; otherwise, account for the
6613     // overhead.
6614     for (Use &U : I->operands())
6615       if (auto *J = dyn_cast<Instruction>(U.get())) {
6616         assert(VectorType::isValidElementType(J->getType()) &&
6617                "Instruction has non-scalar type");
6618         if (canBeScalarized(J))
6619           Worklist.push_back(J);
6620         else if (needsExtract(J, VF)) {
6621           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6622           ScalarCost += TTI.getScalarizationOverhead(
6623               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6624               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6625         }
6626       }
6627 
6628     // Scale the total scalar cost by block probability.
6629     ScalarCost /= getReciprocalPredBlockProb();
6630 
6631     // Compute the discount. A non-negative discount means the vector version
6632     // of the instruction costs more, and scalarizing would be beneficial.
6633     Discount += VectorCost - ScalarCost;
6634     ScalarCosts[I] = ScalarCost;
6635   }
6636 
6637   return *Discount.getValue();
6638 }
6639 
6640 LoopVectorizationCostModel::VectorizationCostTy
6641 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6642   VectorizationCostTy Cost;
6643 
6644   // For each block.
6645   for (BasicBlock *BB : TheLoop->blocks()) {
6646     VectorizationCostTy BlockCost;
6647 
6648     // For each instruction in the old loop.
6649     for (Instruction &I : BB->instructionsWithoutDebug()) {
6650       // Skip ignored values.
6651       if (ValuesToIgnore.count(&I) ||
6652           (VF.isVector() && VecValuesToIgnore.count(&I)))
6653         continue;
6654 
6655       VectorizationCostTy C = getInstructionCost(&I, VF);
6656 
6657       // Check if we should override the cost.
6658       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6659         C.first = InstructionCost(ForceTargetInstructionCost);
6660 
6661       BlockCost.first += C.first;
6662       BlockCost.second |= C.second;
6663       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6664                         << " for VF " << VF << " For instruction: " << I
6665                         << '\n');
6666     }
6667 
6668     // If we are vectorizing a predicated block, it will have been
6669     // if-converted. This means that the block's instructions (aside from
6670     // stores and instructions that may divide by zero) will now be
6671     // unconditionally executed. For the scalar case, we may not always execute
6672     // the predicated block, if it is an if-else block. Thus, scale the block's
6673     // cost by the probability of executing it. blockNeedsPredication from
6674     // Legal is used so as to not include all blocks in tail folded loops.
6675     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6676       BlockCost.first /= getReciprocalPredBlockProb();
6677 
6678     Cost.first += BlockCost.first;
6679     Cost.second |= BlockCost.second;
6680   }
6681 
6682   return Cost;
6683 }
6684 
6685 /// Gets Address Access SCEV after verifying that the access pattern
6686 /// is loop invariant except the induction variable dependence.
6687 ///
6688 /// This SCEV can be sent to the Target in order to estimate the address
6689 /// calculation cost.
6690 static const SCEV *getAddressAccessSCEV(
6691               Value *Ptr,
6692               LoopVectorizationLegality *Legal,
6693               PredicatedScalarEvolution &PSE,
6694               const Loop *TheLoop) {
6695 
6696   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6697   if (!Gep)
6698     return nullptr;
6699 
6700   // We are looking for a gep with all loop invariant indices except for one
6701   // which should be an induction variable.
6702   auto SE = PSE.getSE();
6703   unsigned NumOperands = Gep->getNumOperands();
6704   for (unsigned i = 1; i < NumOperands; ++i) {
6705     Value *Opd = Gep->getOperand(i);
6706     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6707         !Legal->isInductionVariable(Opd))
6708       return nullptr;
6709   }
6710 
6711   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6712   return PSE.getSCEV(Ptr);
6713 }
6714 
6715 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6716   return Legal->hasStride(I->getOperand(0)) ||
6717          Legal->hasStride(I->getOperand(1));
6718 }
6719 
6720 InstructionCost
6721 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6722                                                         ElementCount VF) {
6723   assert(VF.isVector() &&
6724          "Scalarization cost of instruction implies vectorization.");
6725   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6726   Type *ValTy = getMemInstValueType(I);
6727   auto SE = PSE.getSE();
6728 
6729   unsigned AS = getLoadStoreAddressSpace(I);
6730   Value *Ptr = getLoadStorePointerOperand(I);
6731   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6732 
6733   // Figure out whether the access is strided and get the stride value
6734   // if it's known in compile time
6735   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6736 
6737   // Get the cost of the scalar memory instruction and address computation.
6738   InstructionCost Cost =
6739       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6740 
6741   // Don't pass *I here, since it is scalar but will actually be part of a
6742   // vectorized loop where the user of it is a vectorized instruction.
6743   const Align Alignment = getLoadStoreAlignment(I);
6744   Cost += VF.getKnownMinValue() *
6745           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6746                               AS, TTI::TCK_RecipThroughput);
6747 
6748   // Get the overhead of the extractelement and insertelement instructions
6749   // we might create due to scalarization.
6750   Cost += getScalarizationOverhead(I, VF);
6751 
6752   // If we have a predicated store, it may not be executed for each vector
6753   // lane. Scale the cost by the probability of executing the predicated
6754   // block.
6755   if (isPredicatedInst(I)) {
6756     Cost /= getReciprocalPredBlockProb();
6757 
6758     if (useEmulatedMaskMemRefHack(I))
6759       // Artificially setting to a high enough value to practically disable
6760       // vectorization with such operations.
6761       Cost = 3000000;
6762   }
6763 
6764   return Cost;
6765 }
6766 
6767 InstructionCost
6768 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6769                                                     ElementCount VF) {
6770   Type *ValTy = getMemInstValueType(I);
6771   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6772   Value *Ptr = getLoadStorePointerOperand(I);
6773   unsigned AS = getLoadStoreAddressSpace(I);
6774   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6775   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6776 
6777   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6778          "Stride should be 1 or -1 for consecutive memory access");
6779   const Align Alignment = getLoadStoreAlignment(I);
6780   InstructionCost Cost = 0;
6781   if (Legal->isMaskRequired(I))
6782     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6783                                       CostKind);
6784   else
6785     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6786                                 CostKind, I);
6787 
6788   bool Reverse = ConsecutiveStride < 0;
6789   if (Reverse)
6790     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6791   return Cost;
6792 }
6793 
6794 InstructionCost
6795 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6796                                                 ElementCount VF) {
6797   assert(Legal->isUniformMemOp(*I));
6798 
6799   Type *ValTy = getMemInstValueType(I);
6800   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6801   const Align Alignment = getLoadStoreAlignment(I);
6802   unsigned AS = getLoadStoreAddressSpace(I);
6803   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6804   if (isa<LoadInst>(I)) {
6805     return TTI.getAddressComputationCost(ValTy) +
6806            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6807                                CostKind) +
6808            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6809   }
6810   StoreInst *SI = cast<StoreInst>(I);
6811 
6812   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6813   return TTI.getAddressComputationCost(ValTy) +
6814          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6815                              CostKind) +
6816          (isLoopInvariantStoreValue
6817               ? 0
6818               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6819                                        VF.getKnownMinValue() - 1));
6820 }
6821 
6822 InstructionCost
6823 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6824                                                  ElementCount VF) {
6825   Type *ValTy = getMemInstValueType(I);
6826   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6827   const Align Alignment = getLoadStoreAlignment(I);
6828   const Value *Ptr = getLoadStorePointerOperand(I);
6829 
6830   return TTI.getAddressComputationCost(VectorTy) +
6831          TTI.getGatherScatterOpCost(
6832              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6833              TargetTransformInfo::TCK_RecipThroughput, I);
6834 }
6835 
6836 InstructionCost
6837 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6838                                                    ElementCount VF) {
6839   // TODO: Once we have support for interleaving with scalable vectors
6840   // we can calculate the cost properly here.
6841   if (VF.isScalable())
6842     return InstructionCost::getInvalid();
6843 
6844   Type *ValTy = getMemInstValueType(I);
6845   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6846   unsigned AS = getLoadStoreAddressSpace(I);
6847 
6848   auto Group = getInterleavedAccessGroup(I);
6849   assert(Group && "Fail to get an interleaved access group.");
6850 
6851   unsigned InterleaveFactor = Group->getFactor();
6852   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6853 
6854   // Holds the indices of existing members in an interleaved load group.
6855   // An interleaved store group doesn't need this as it doesn't allow gaps.
6856   SmallVector<unsigned, 4> Indices;
6857   if (isa<LoadInst>(I)) {
6858     for (unsigned i = 0; i < InterleaveFactor; i++)
6859       if (Group->getMember(i))
6860         Indices.push_back(i);
6861   }
6862 
6863   // Calculate the cost of the whole interleaved group.
6864   bool UseMaskForGaps =
6865       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6866   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6867       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6868       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6869 
6870   if (Group->isReverse()) {
6871     // TODO: Add support for reversed masked interleaved access.
6872     assert(!Legal->isMaskRequired(I) &&
6873            "Reverse masked interleaved access not supported.");
6874     Cost += Group->getNumMembers() *
6875             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6876   }
6877   return Cost;
6878 }
6879 
6880 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
6881     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6882   // Early exit for no inloop reductions
6883   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6884     return InstructionCost::getInvalid();
6885   auto *VectorTy = cast<VectorType>(Ty);
6886 
6887   // We are looking for a pattern of, and finding the minimal acceptable cost:
6888   //  reduce(mul(ext(A), ext(B))) or
6889   //  reduce(mul(A, B)) or
6890   //  reduce(ext(A)) or
6891   //  reduce(A).
6892   // The basic idea is that we walk down the tree to do that, finding the root
6893   // reduction instruction in InLoopReductionImmediateChains. From there we find
6894   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6895   // of the components. If the reduction cost is lower then we return it for the
6896   // reduction instruction and 0 for the other instructions in the pattern. If
6897   // it is not we return an invalid cost specifying the orignal cost method
6898   // should be used.
6899   Instruction *RetI = I;
6900   if ((RetI->getOpcode() == Instruction::SExt ||
6901        RetI->getOpcode() == Instruction::ZExt)) {
6902     if (!RetI->hasOneUser())
6903       return InstructionCost::getInvalid();
6904     RetI = RetI->user_back();
6905   }
6906   if (RetI->getOpcode() == Instruction::Mul &&
6907       RetI->user_back()->getOpcode() == Instruction::Add) {
6908     if (!RetI->hasOneUser())
6909       return InstructionCost::getInvalid();
6910     RetI = RetI->user_back();
6911   }
6912 
6913   // Test if the found instruction is a reduction, and if not return an invalid
6914   // cost specifying the parent to use the original cost modelling.
6915   if (!InLoopReductionImmediateChains.count(RetI))
6916     return InstructionCost::getInvalid();
6917 
6918   // Find the reduction this chain is a part of and calculate the basic cost of
6919   // the reduction on its own.
6920   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6921   Instruction *ReductionPhi = LastChain;
6922   while (!isa<PHINode>(ReductionPhi))
6923     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6924 
6925   RecurrenceDescriptor RdxDesc =
6926       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
6927   unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(),
6928                                                      VectorTy, false, CostKind);
6929 
6930   // Get the operand that was not the reduction chain and match it to one of the
6931   // patterns, returning the better cost if it is found.
6932   Instruction *RedOp = RetI->getOperand(1) == LastChain
6933                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6934                            : dyn_cast<Instruction>(RetI->getOperand(1));
6935 
6936   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6937 
6938   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
6939       !TheLoop->isLoopInvariant(RedOp)) {
6940     bool IsUnsigned = isa<ZExtInst>(RedOp);
6941     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6942     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6943         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6944         CostKind);
6945 
6946     unsigned ExtCost =
6947         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6948                              TTI::CastContextHint::None, CostKind, RedOp);
6949     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6950       return I == RetI ? *RedCost.getValue() : 0;
6951   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
6952     Instruction *Mul = RedOp;
6953     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
6954     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
6955     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
6956         Op0->getOpcode() == Op1->getOpcode() &&
6957         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6958         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6959       bool IsUnsigned = isa<ZExtInst>(Op0);
6960       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6961       // reduce(mul(ext, ext))
6962       unsigned ExtCost =
6963           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
6964                                TTI::CastContextHint::None, CostKind, Op0);
6965       unsigned MulCost =
6966           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6967 
6968       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6969           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6970           CostKind);
6971 
6972       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
6973         return I == RetI ? *RedCost.getValue() : 0;
6974     } else {
6975       unsigned MulCost =
6976           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6977 
6978       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6979           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6980           CostKind);
6981 
6982       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6983         return I == RetI ? *RedCost.getValue() : 0;
6984     }
6985   }
6986 
6987   return I == RetI ? BaseCost : InstructionCost::getInvalid();
6988 }
6989 
6990 InstructionCost
6991 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6992                                                      ElementCount VF) {
6993   // Calculate scalar cost only. Vectorization cost should be ready at this
6994   // moment.
6995   if (VF.isScalar()) {
6996     Type *ValTy = getMemInstValueType(I);
6997     const Align Alignment = getLoadStoreAlignment(I);
6998     unsigned AS = getLoadStoreAddressSpace(I);
6999 
7000     return TTI.getAddressComputationCost(ValTy) +
7001            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7002                                TTI::TCK_RecipThroughput, I);
7003   }
7004   return getWideningCost(I, VF);
7005 }
7006 
7007 LoopVectorizationCostModel::VectorizationCostTy
7008 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7009                                                ElementCount VF) {
7010   // If we know that this instruction will remain uniform, check the cost of
7011   // the scalar version.
7012   if (isUniformAfterVectorization(I, VF))
7013     VF = ElementCount::getFixed(1);
7014 
7015   if (VF.isVector() && isProfitableToScalarize(I, VF))
7016     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7017 
7018   // Forced scalars do not have any scalarization overhead.
7019   auto ForcedScalar = ForcedScalars.find(VF);
7020   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7021     auto InstSet = ForcedScalar->second;
7022     if (InstSet.count(I))
7023       return VectorizationCostTy(
7024           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7025            VF.getKnownMinValue()),
7026           false);
7027   }
7028 
7029   Type *VectorTy;
7030   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7031 
7032   bool TypeNotScalarized =
7033       VF.isVector() && VectorTy->isVectorTy() &&
7034       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7035   return VectorizationCostTy(C, TypeNotScalarized);
7036 }
7037 
7038 InstructionCost
7039 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7040                                                      ElementCount VF) {
7041 
7042   assert(!VF.isScalable() &&
7043          "cannot compute scalarization overhead for scalable vectorization");
7044   if (VF.isScalar())
7045     return 0;
7046 
7047   InstructionCost Cost = 0;
7048   Type *RetTy = ToVectorTy(I->getType(), VF);
7049   if (!RetTy->isVoidTy() &&
7050       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7051     Cost += TTI.getScalarizationOverhead(
7052         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7053         true, false);
7054 
7055   // Some targets keep addresses scalar.
7056   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7057     return Cost;
7058 
7059   // Some targets support efficient element stores.
7060   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7061     return Cost;
7062 
7063   // Collect operands to consider.
7064   CallInst *CI = dyn_cast<CallInst>(I);
7065   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7066 
7067   // Skip operands that do not require extraction/scalarization and do not incur
7068   // any overhead.
7069   return Cost + TTI.getOperandsScalarizationOverhead(
7070                     filterExtractingOperands(Ops, VF), VF.getKnownMinValue());
7071 }
7072 
7073 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7074   if (VF.isScalar())
7075     return;
7076   NumPredStores = 0;
7077   for (BasicBlock *BB : TheLoop->blocks()) {
7078     // For each instruction in the old loop.
7079     for (Instruction &I : *BB) {
7080       Value *Ptr =  getLoadStorePointerOperand(&I);
7081       if (!Ptr)
7082         continue;
7083 
7084       // TODO: We should generate better code and update the cost model for
7085       // predicated uniform stores. Today they are treated as any other
7086       // predicated store (see added test cases in
7087       // invariant-store-vectorization.ll).
7088       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7089         NumPredStores++;
7090 
7091       if (Legal->isUniformMemOp(I)) {
7092         // TODO: Avoid replicating loads and stores instead of
7093         // relying on instcombine to remove them.
7094         // Load: Scalar load + broadcast
7095         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7096         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7097         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7098         continue;
7099       }
7100 
7101       // We assume that widening is the best solution when possible.
7102       if (memoryInstructionCanBeWidened(&I, VF)) {
7103         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7104         int ConsecutiveStride =
7105                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7106         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7107                "Expected consecutive stride.");
7108         InstWidening Decision =
7109             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7110         setWideningDecision(&I, VF, Decision, Cost);
7111         continue;
7112       }
7113 
7114       // Choose between Interleaving, Gather/Scatter or Scalarization.
7115       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7116       unsigned NumAccesses = 1;
7117       if (isAccessInterleaved(&I)) {
7118         auto Group = getInterleavedAccessGroup(&I);
7119         assert(Group && "Fail to get an interleaved access group.");
7120 
7121         // Make one decision for the whole group.
7122         if (getWideningDecision(&I, VF) != CM_Unknown)
7123           continue;
7124 
7125         NumAccesses = Group->getNumMembers();
7126         if (interleavedAccessCanBeWidened(&I, VF))
7127           InterleaveCost = getInterleaveGroupCost(&I, VF);
7128       }
7129 
7130       InstructionCost GatherScatterCost =
7131           isLegalGatherOrScatter(&I)
7132               ? getGatherScatterCost(&I, VF) * NumAccesses
7133               : InstructionCost::getInvalid();
7134 
7135       InstructionCost ScalarizationCost =
7136           !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses
7137                            : InstructionCost::getInvalid();
7138 
7139       // Choose better solution for the current VF,
7140       // write down this decision and use it during vectorization.
7141       InstructionCost Cost;
7142       InstWidening Decision;
7143       if (InterleaveCost <= GatherScatterCost &&
7144           InterleaveCost < ScalarizationCost) {
7145         Decision = CM_Interleave;
7146         Cost = InterleaveCost;
7147       } else if (GatherScatterCost < ScalarizationCost) {
7148         Decision = CM_GatherScatter;
7149         Cost = GatherScatterCost;
7150       } else {
7151         assert(!VF.isScalable() &&
7152                "We cannot yet scalarise for scalable vectors");
7153         Decision = CM_Scalarize;
7154         Cost = ScalarizationCost;
7155       }
7156       // If the instructions belongs to an interleave group, the whole group
7157       // receives the same decision. The whole group receives the cost, but
7158       // the cost will actually be assigned to one instruction.
7159       if (auto Group = getInterleavedAccessGroup(&I))
7160         setWideningDecision(Group, VF, Decision, Cost);
7161       else
7162         setWideningDecision(&I, VF, Decision, Cost);
7163     }
7164   }
7165 
7166   // Make sure that any load of address and any other address computation
7167   // remains scalar unless there is gather/scatter support. This avoids
7168   // inevitable extracts into address registers, and also has the benefit of
7169   // activating LSR more, since that pass can't optimize vectorized
7170   // addresses.
7171   if (TTI.prefersVectorizedAddressing())
7172     return;
7173 
7174   // Start with all scalar pointer uses.
7175   SmallPtrSet<Instruction *, 8> AddrDefs;
7176   for (BasicBlock *BB : TheLoop->blocks())
7177     for (Instruction &I : *BB) {
7178       Instruction *PtrDef =
7179         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7180       if (PtrDef && TheLoop->contains(PtrDef) &&
7181           getWideningDecision(&I, VF) != CM_GatherScatter)
7182         AddrDefs.insert(PtrDef);
7183     }
7184 
7185   // Add all instructions used to generate the addresses.
7186   SmallVector<Instruction *, 4> Worklist;
7187   append_range(Worklist, AddrDefs);
7188   while (!Worklist.empty()) {
7189     Instruction *I = Worklist.pop_back_val();
7190     for (auto &Op : I->operands())
7191       if (auto *InstOp = dyn_cast<Instruction>(Op))
7192         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7193             AddrDefs.insert(InstOp).second)
7194           Worklist.push_back(InstOp);
7195   }
7196 
7197   for (auto *I : AddrDefs) {
7198     if (isa<LoadInst>(I)) {
7199       // Setting the desired widening decision should ideally be handled in
7200       // by cost functions, but since this involves the task of finding out
7201       // if the loaded register is involved in an address computation, it is
7202       // instead changed here when we know this is the case.
7203       InstWidening Decision = getWideningDecision(I, VF);
7204       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7205         // Scalarize a widened load of address.
7206         setWideningDecision(
7207             I, VF, CM_Scalarize,
7208             (VF.getKnownMinValue() *
7209              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7210       else if (auto Group = getInterleavedAccessGroup(I)) {
7211         // Scalarize an interleave group of address loads.
7212         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7213           if (Instruction *Member = Group->getMember(I))
7214             setWideningDecision(
7215                 Member, VF, CM_Scalarize,
7216                 (VF.getKnownMinValue() *
7217                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7218         }
7219       }
7220     } else
7221       // Make sure I gets scalarized and a cost estimate without
7222       // scalarization overhead.
7223       ForcedScalars[VF].insert(I);
7224   }
7225 }
7226 
7227 InstructionCost
7228 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7229                                                Type *&VectorTy) {
7230   Type *RetTy = I->getType();
7231   if (canTruncateToMinimalBitwidth(I, VF))
7232     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7233   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7234   auto SE = PSE.getSE();
7235   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7236 
7237   // TODO: We need to estimate the cost of intrinsic calls.
7238   switch (I->getOpcode()) {
7239   case Instruction::GetElementPtr:
7240     // We mark this instruction as zero-cost because the cost of GEPs in
7241     // vectorized code depends on whether the corresponding memory instruction
7242     // is scalarized or not. Therefore, we handle GEPs with the memory
7243     // instruction cost.
7244     return 0;
7245   case Instruction::Br: {
7246     // In cases of scalarized and predicated instructions, there will be VF
7247     // predicated blocks in the vectorized loop. Each branch around these
7248     // blocks requires also an extract of its vector compare i1 element.
7249     bool ScalarPredicatedBB = false;
7250     BranchInst *BI = cast<BranchInst>(I);
7251     if (VF.isVector() && BI->isConditional() &&
7252         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7253          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7254       ScalarPredicatedBB = true;
7255 
7256     if (ScalarPredicatedBB) {
7257       // Return cost for branches around scalarized and predicated blocks.
7258       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7259       auto *Vec_i1Ty =
7260           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7261       return (TTI.getScalarizationOverhead(
7262                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7263                   false, true) +
7264               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7265                VF.getKnownMinValue()));
7266     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7267       // The back-edge branch will remain, as will all scalar branches.
7268       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7269     else
7270       // This branch will be eliminated by if-conversion.
7271       return 0;
7272     // Note: We currently assume zero cost for an unconditional branch inside
7273     // a predicated block since it will become a fall-through, although we
7274     // may decide in the future to call TTI for all branches.
7275   }
7276   case Instruction::PHI: {
7277     auto *Phi = cast<PHINode>(I);
7278 
7279     // First-order recurrences are replaced by vector shuffles inside the loop.
7280     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7281     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7282       return TTI.getShuffleCost(
7283           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7284           VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7285 
7286     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7287     // converted into select instructions. We require N - 1 selects per phi
7288     // node, where N is the number of incoming values.
7289     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7290       return (Phi->getNumIncomingValues() - 1) *
7291              TTI.getCmpSelInstrCost(
7292                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7293                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7294                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7295 
7296     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7297   }
7298   case Instruction::UDiv:
7299   case Instruction::SDiv:
7300   case Instruction::URem:
7301   case Instruction::SRem:
7302     // If we have a predicated instruction, it may not be executed for each
7303     // vector lane. Get the scalarization cost and scale this amount by the
7304     // probability of executing the predicated block. If the instruction is not
7305     // predicated, we fall through to the next case.
7306     if (VF.isVector() && isScalarWithPredication(I)) {
7307       InstructionCost Cost = 0;
7308 
7309       // These instructions have a non-void type, so account for the phi nodes
7310       // that we will create. This cost is likely to be zero. The phi node
7311       // cost, if any, should be scaled by the block probability because it
7312       // models a copy at the end of each predicated block.
7313       Cost += VF.getKnownMinValue() *
7314               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7315 
7316       // The cost of the non-predicated instruction.
7317       Cost += VF.getKnownMinValue() *
7318               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7319 
7320       // The cost of insertelement and extractelement instructions needed for
7321       // scalarization.
7322       Cost += getScalarizationOverhead(I, VF);
7323 
7324       // Scale the cost by the probability of executing the predicated blocks.
7325       // This assumes the predicated block for each vector lane is equally
7326       // likely.
7327       return Cost / getReciprocalPredBlockProb();
7328     }
7329     LLVM_FALLTHROUGH;
7330   case Instruction::Add:
7331   case Instruction::FAdd:
7332   case Instruction::Sub:
7333   case Instruction::FSub:
7334   case Instruction::Mul:
7335   case Instruction::FMul:
7336   case Instruction::FDiv:
7337   case Instruction::FRem:
7338   case Instruction::Shl:
7339   case Instruction::LShr:
7340   case Instruction::AShr:
7341   case Instruction::And:
7342   case Instruction::Or:
7343   case Instruction::Xor: {
7344     // Since we will replace the stride by 1 the multiplication should go away.
7345     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7346       return 0;
7347 
7348     // Detect reduction patterns
7349     InstructionCost RedCost;
7350     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7351             .isValid())
7352       return RedCost;
7353 
7354     // Certain instructions can be cheaper to vectorize if they have a constant
7355     // second vector operand. One example of this are shifts on x86.
7356     Value *Op2 = I->getOperand(1);
7357     TargetTransformInfo::OperandValueProperties Op2VP;
7358     TargetTransformInfo::OperandValueKind Op2VK =
7359         TTI.getOperandInfo(Op2, Op2VP);
7360     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7361       Op2VK = TargetTransformInfo::OK_UniformValue;
7362 
7363     SmallVector<const Value *, 4> Operands(I->operand_values());
7364     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7365     return N * TTI.getArithmeticInstrCost(
7366                    I->getOpcode(), VectorTy, CostKind,
7367                    TargetTransformInfo::OK_AnyValue,
7368                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7369   }
7370   case Instruction::FNeg: {
7371     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
7372     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7373     return N * TTI.getArithmeticInstrCost(
7374                    I->getOpcode(), VectorTy, CostKind,
7375                    TargetTransformInfo::OK_AnyValue,
7376                    TargetTransformInfo::OK_AnyValue,
7377                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
7378                    I->getOperand(0), I);
7379   }
7380   case Instruction::Select: {
7381     SelectInst *SI = cast<SelectInst>(I);
7382     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7383     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7384     Type *CondTy = SI->getCondition()->getType();
7385     if (!ScalarCond)
7386       CondTy = VectorType::get(CondTy, VF);
7387     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7388                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7389   }
7390   case Instruction::ICmp:
7391   case Instruction::FCmp: {
7392     Type *ValTy = I->getOperand(0)->getType();
7393     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7394     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7395       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7396     VectorTy = ToVectorTy(ValTy, VF);
7397     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7398                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7399   }
7400   case Instruction::Store:
7401   case Instruction::Load: {
7402     ElementCount Width = VF;
7403     if (Width.isVector()) {
7404       InstWidening Decision = getWideningDecision(I, Width);
7405       assert(Decision != CM_Unknown &&
7406              "CM decision should be taken at this point");
7407       if (Decision == CM_Scalarize)
7408         Width = ElementCount::getFixed(1);
7409     }
7410     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7411     return getMemoryInstructionCost(I, VF);
7412   }
7413   case Instruction::ZExt:
7414   case Instruction::SExt:
7415   case Instruction::FPToUI:
7416   case Instruction::FPToSI:
7417   case Instruction::FPExt:
7418   case Instruction::PtrToInt:
7419   case Instruction::IntToPtr:
7420   case Instruction::SIToFP:
7421   case Instruction::UIToFP:
7422   case Instruction::Trunc:
7423   case Instruction::FPTrunc:
7424   case Instruction::BitCast: {
7425     // Computes the CastContextHint from a Load/Store instruction.
7426     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7427       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7428              "Expected a load or a store!");
7429 
7430       if (VF.isScalar() || !TheLoop->contains(I))
7431         return TTI::CastContextHint::Normal;
7432 
7433       switch (getWideningDecision(I, VF)) {
7434       case LoopVectorizationCostModel::CM_GatherScatter:
7435         return TTI::CastContextHint::GatherScatter;
7436       case LoopVectorizationCostModel::CM_Interleave:
7437         return TTI::CastContextHint::Interleave;
7438       case LoopVectorizationCostModel::CM_Scalarize:
7439       case LoopVectorizationCostModel::CM_Widen:
7440         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7441                                         : TTI::CastContextHint::Normal;
7442       case LoopVectorizationCostModel::CM_Widen_Reverse:
7443         return TTI::CastContextHint::Reversed;
7444       case LoopVectorizationCostModel::CM_Unknown:
7445         llvm_unreachable("Instr did not go through cost modelling?");
7446       }
7447 
7448       llvm_unreachable("Unhandled case!");
7449     };
7450 
7451     unsigned Opcode = I->getOpcode();
7452     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7453     // For Trunc, the context is the only user, which must be a StoreInst.
7454     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7455       if (I->hasOneUse())
7456         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7457           CCH = ComputeCCH(Store);
7458     }
7459     // For Z/Sext, the context is the operand, which must be a LoadInst.
7460     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7461              Opcode == Instruction::FPExt) {
7462       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7463         CCH = ComputeCCH(Load);
7464     }
7465 
7466     // We optimize the truncation of induction variables having constant
7467     // integer steps. The cost of these truncations is the same as the scalar
7468     // operation.
7469     if (isOptimizableIVTruncate(I, VF)) {
7470       auto *Trunc = cast<TruncInst>(I);
7471       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7472                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7473     }
7474 
7475     // Detect reduction patterns
7476     InstructionCost RedCost;
7477     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7478             .isValid())
7479       return RedCost;
7480 
7481     Type *SrcScalarTy = I->getOperand(0)->getType();
7482     Type *SrcVecTy =
7483         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7484     if (canTruncateToMinimalBitwidth(I, VF)) {
7485       // This cast is going to be shrunk. This may remove the cast or it might
7486       // turn it into slightly different cast. For example, if MinBW == 16,
7487       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7488       //
7489       // Calculate the modified src and dest types.
7490       Type *MinVecTy = VectorTy;
7491       if (Opcode == Instruction::Trunc) {
7492         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7493         VectorTy =
7494             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7495       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7496         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7497         VectorTy =
7498             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7499       }
7500     }
7501 
7502     unsigned N;
7503     if (isScalarAfterVectorization(I, VF)) {
7504       assert(!VF.isScalable() && "VF is assumed to be non scalable");
7505       N = VF.getKnownMinValue();
7506     } else
7507       N = 1;
7508     return N *
7509            TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7510   }
7511   case Instruction::Call: {
7512     bool NeedToScalarize;
7513     CallInst *CI = cast<CallInst>(I);
7514     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7515     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7516       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7517       return std::min(CallCost, IntrinsicCost);
7518     }
7519     return CallCost;
7520   }
7521   case Instruction::ExtractValue:
7522     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7523   default:
7524     // The cost of executing VF copies of the scalar instruction. This opcode
7525     // is unknown. Assume that it is the same as 'mul'.
7526     return VF.getKnownMinValue() * TTI.getArithmeticInstrCost(
7527                                        Instruction::Mul, VectorTy, CostKind) +
7528            getScalarizationOverhead(I, VF);
7529   } // end of switch.
7530 }
7531 
7532 char LoopVectorize::ID = 0;
7533 
7534 static const char lv_name[] = "Loop Vectorization";
7535 
7536 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7537 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7538 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7539 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7540 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7541 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7542 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7543 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7544 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7545 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7546 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7547 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7548 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7549 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7550 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7551 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7552 
7553 namespace llvm {
7554 
7555 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7556 
7557 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7558                               bool VectorizeOnlyWhenForced) {
7559   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7560 }
7561 
7562 } // end namespace llvm
7563 
7564 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7565   // Check if the pointer operand of a load or store instruction is
7566   // consecutive.
7567   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7568     return Legal->isConsecutivePtr(Ptr);
7569   return false;
7570 }
7571 
7572 void LoopVectorizationCostModel::collectValuesToIgnore() {
7573   // Ignore ephemeral values.
7574   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7575 
7576   // Ignore type-promoting instructions we identified during reduction
7577   // detection.
7578   for (auto &Reduction : Legal->getReductionVars()) {
7579     RecurrenceDescriptor &RedDes = Reduction.second;
7580     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7581     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7582   }
7583   // Ignore type-casting instructions we identified during induction
7584   // detection.
7585   for (auto &Induction : Legal->getInductionVars()) {
7586     InductionDescriptor &IndDes = Induction.second;
7587     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7588     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7589   }
7590 }
7591 
7592 void LoopVectorizationCostModel::collectInLoopReductions() {
7593   for (auto &Reduction : Legal->getReductionVars()) {
7594     PHINode *Phi = Reduction.first;
7595     RecurrenceDescriptor &RdxDesc = Reduction.second;
7596 
7597     // We don't collect reductions that are type promoted (yet).
7598     if (RdxDesc.getRecurrenceType() != Phi->getType())
7599       continue;
7600 
7601     // If the target would prefer this reduction to happen "in-loop", then we
7602     // want to record it as such.
7603     unsigned Opcode = RdxDesc.getOpcode();
7604     if (!PreferInLoopReductions &&
7605         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7606                                    TargetTransformInfo::ReductionFlags()))
7607       continue;
7608 
7609     // Check that we can correctly put the reductions into the loop, by
7610     // finding the chain of operations that leads from the phi to the loop
7611     // exit value.
7612     SmallVector<Instruction *, 4> ReductionOperations =
7613         RdxDesc.getReductionOpChain(Phi, TheLoop);
7614     bool InLoop = !ReductionOperations.empty();
7615     if (InLoop) {
7616       InLoopReductionChains[Phi] = ReductionOperations;
7617       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7618       Instruction *LastChain = Phi;
7619       for (auto *I : ReductionOperations) {
7620         InLoopReductionImmediateChains[I] = LastChain;
7621         LastChain = I;
7622       }
7623     }
7624     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7625                       << " reduction for phi: " << *Phi << "\n");
7626   }
7627 }
7628 
7629 // TODO: we could return a pair of values that specify the max VF and
7630 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7631 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7632 // doesn't have a cost model that can choose which plan to execute if
7633 // more than one is generated.
7634 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7635                                  LoopVectorizationCostModel &CM) {
7636   unsigned WidestType;
7637   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7638   return WidestVectorRegBits / WidestType;
7639 }
7640 
7641 VectorizationFactor
7642 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7643   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7644   ElementCount VF = UserVF;
7645   // Outer loop handling: They may require CFG and instruction level
7646   // transformations before even evaluating whether vectorization is profitable.
7647   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7648   // the vectorization pipeline.
7649   if (!OrigLoop->isInnermost()) {
7650     // If the user doesn't provide a vectorization factor, determine a
7651     // reasonable one.
7652     if (UserVF.isZero()) {
7653       VF = ElementCount::getFixed(
7654           determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM));
7655       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7656 
7657       // Make sure we have a VF > 1 for stress testing.
7658       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7659         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7660                           << "overriding computed VF.\n");
7661         VF = ElementCount::getFixed(4);
7662       }
7663     }
7664     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7665     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7666            "VF needs to be a power of two");
7667     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7668                       << "VF " << VF << " to build VPlans.\n");
7669     buildVPlans(VF, VF);
7670 
7671     // For VPlan build stress testing, we bail out after VPlan construction.
7672     if (VPlanBuildStressTest)
7673       return VectorizationFactor::Disabled();
7674 
7675     return {VF, 0 /*Cost*/};
7676   }
7677 
7678   LLVM_DEBUG(
7679       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7680                 "VPlan-native path.\n");
7681   return VectorizationFactor::Disabled();
7682 }
7683 
7684 Optional<VectorizationFactor>
7685 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7686   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7687   Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
7688   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
7689     return None;
7690 
7691   // Invalidate interleave groups if all blocks of loop will be predicated.
7692   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7693       !useMaskedInterleavedAccesses(*TTI)) {
7694     LLVM_DEBUG(
7695         dbgs()
7696         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7697            "which requires masked-interleaved support.\n");
7698     if (CM.InterleaveInfo.invalidateGroups())
7699       // Invalidating interleave groups also requires invalidating all decisions
7700       // based on them, which includes widening decisions and uniform and scalar
7701       // values.
7702       CM.invalidateCostModelingDecisions();
7703   }
7704 
7705   ElementCount MaxVF = MaybeMaxVF.getValue();
7706   assert(MaxVF.isNonZero() && "MaxVF is zero.");
7707 
7708   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF);
7709   if (!UserVF.isZero() &&
7710       (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) {
7711     // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable
7712     // VFs here, this should be reverted to only use legal UserVFs once the
7713     // loop below supports scalable VFs.
7714     ElementCount VF = UserVFIsLegal ? UserVF : MaxVF;
7715     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7716                       << " VF " << VF << ".\n");
7717     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7718            "VF needs to be a power of two");
7719     // Collect the instructions (and their associated costs) that will be more
7720     // profitable to scalarize.
7721     CM.selectUserVectorizationFactor(VF);
7722     CM.collectInLoopReductions();
7723     buildVPlansWithVPRecipes(VF, VF);
7724     LLVM_DEBUG(printPlans(dbgs()));
7725     return {{VF, 0}};
7726   }
7727 
7728   assert(!MaxVF.isScalable() &&
7729          "Scalable vectors not yet supported beyond this point");
7730 
7731   for (ElementCount VF = ElementCount::getFixed(1);
7732        ElementCount::isKnownLE(VF, MaxVF); VF *= 2) {
7733     // Collect Uniform and Scalar instructions after vectorization with VF.
7734     CM.collectUniformsAndScalars(VF);
7735 
7736     // Collect the instructions (and their associated costs) that will be more
7737     // profitable to scalarize.
7738     if (VF.isVector())
7739       CM.collectInstsToScalarize(VF);
7740   }
7741 
7742   CM.collectInLoopReductions();
7743 
7744   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF);
7745   LLVM_DEBUG(printPlans(dbgs()));
7746   if (MaxVF.isScalar())
7747     return VectorizationFactor::Disabled();
7748 
7749   // Select the optimal vectorization factor.
7750   return CM.selectVectorizationFactor(MaxVF);
7751 }
7752 
7753 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
7754   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
7755                     << '\n');
7756   BestVF = VF;
7757   BestUF = UF;
7758 
7759   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7760     return !Plan->hasVF(VF);
7761   });
7762   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7763 }
7764 
7765 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7766                                            DominatorTree *DT) {
7767   // Perform the actual loop transformation.
7768 
7769   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7770   VPCallbackILV CallbackILV(ILV);
7771 
7772   assert(BestVF.hasValue() && "Vectorization Factor is missing");
7773   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7774 
7775   VPTransformState State{*BestVF,     BestUF,
7776                          LI,          DT,
7777                          ILV.Builder, ILV.VectorLoopValueMap,
7778                          &ILV,        VPlans.front().get(),
7779                          CallbackILV};
7780   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7781   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7782   State.CanonicalIV = ILV.Induction;
7783 
7784   ILV.printDebugTracesAtStart();
7785 
7786   //===------------------------------------------------===//
7787   //
7788   // Notice: any optimization or new instruction that go
7789   // into the code below should also be implemented in
7790   // the cost-model.
7791   //
7792   //===------------------------------------------------===//
7793 
7794   // 2. Copy and widen instructions from the old loop into the new loop.
7795   VPlans.front()->execute(&State);
7796 
7797   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7798   //    predication, updating analyses.
7799   ILV.fixVectorizedLoop(State);
7800 
7801   ILV.printDebugTracesAtEnd();
7802 }
7803 
7804 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7805     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7806 
7807   // We create new control-flow for the vectorized loop, so the original exit
7808   // conditions will be dead after vectorization if it's only used by the
7809   // terminator
7810   SmallVector<BasicBlock*> ExitingBlocks;
7811   OrigLoop->getExitingBlocks(ExitingBlocks);
7812   for (auto *BB : ExitingBlocks) {
7813     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7814     if (!Cmp || !Cmp->hasOneUse())
7815       continue;
7816 
7817     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7818     if (!DeadInstructions.insert(Cmp).second)
7819       continue;
7820 
7821     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7822     // TODO: can recurse through operands in general
7823     for (Value *Op : Cmp->operands()) {
7824       if (isa<TruncInst>(Op) && Op->hasOneUse())
7825           DeadInstructions.insert(cast<Instruction>(Op));
7826     }
7827   }
7828 
7829   // We create new "steps" for induction variable updates to which the original
7830   // induction variables map. An original update instruction will be dead if
7831   // all its users except the induction variable are dead.
7832   auto *Latch = OrigLoop->getLoopLatch();
7833   for (auto &Induction : Legal->getInductionVars()) {
7834     PHINode *Ind = Induction.first;
7835     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7836 
7837     // If the tail is to be folded by masking, the primary induction variable,
7838     // if exists, isn't dead: it will be used for masking. Don't kill it.
7839     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7840       continue;
7841 
7842     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7843           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7844         }))
7845       DeadInstructions.insert(IndUpdate);
7846 
7847     // We record as "Dead" also the type-casting instructions we had identified
7848     // during induction analysis. We don't need any handling for them in the
7849     // vectorized loop because we have proven that, under a proper runtime
7850     // test guarding the vectorized loop, the value of the phi, and the casted
7851     // value of the phi, are the same. The last instruction in this casting chain
7852     // will get its scalar/vector/widened def from the scalar/vector/widened def
7853     // of the respective phi node. Any other casts in the induction def-use chain
7854     // have no other uses outside the phi update chain, and will be ignored.
7855     InductionDescriptor &IndDes = Induction.second;
7856     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7857     DeadInstructions.insert(Casts.begin(), Casts.end());
7858   }
7859 }
7860 
7861 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7862 
7863 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7864 
7865 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7866                                         Instruction::BinaryOps BinOp) {
7867   // When unrolling and the VF is 1, we only need to add a simple scalar.
7868   Type *Ty = Val->getType();
7869   assert(!Ty->isVectorTy() && "Val must be a scalar");
7870 
7871   if (Ty->isFloatingPointTy()) {
7872     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7873 
7874     // Floating point operations had to be 'fast' to enable the unrolling.
7875     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7876     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7877   }
7878   Constant *C = ConstantInt::get(Ty, StartIdx);
7879   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7880 }
7881 
7882 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7883   SmallVector<Metadata *, 4> MDs;
7884   // Reserve first location for self reference to the LoopID metadata node.
7885   MDs.push_back(nullptr);
7886   bool IsUnrollMetadata = false;
7887   MDNode *LoopID = L->getLoopID();
7888   if (LoopID) {
7889     // First find existing loop unrolling disable metadata.
7890     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7891       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7892       if (MD) {
7893         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7894         IsUnrollMetadata =
7895             S && S->getString().startswith("llvm.loop.unroll.disable");
7896       }
7897       MDs.push_back(LoopID->getOperand(i));
7898     }
7899   }
7900 
7901   if (!IsUnrollMetadata) {
7902     // Add runtime unroll disable metadata.
7903     LLVMContext &Context = L->getHeader()->getContext();
7904     SmallVector<Metadata *, 1> DisableOperands;
7905     DisableOperands.push_back(
7906         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7907     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7908     MDs.push_back(DisableNode);
7909     MDNode *NewLoopID = MDNode::get(Context, MDs);
7910     // Set operand 0 to refer to the loop id itself.
7911     NewLoopID->replaceOperandWith(0, NewLoopID);
7912     L->setLoopID(NewLoopID);
7913   }
7914 }
7915 
7916 //===--------------------------------------------------------------------===//
7917 // EpilogueVectorizerMainLoop
7918 //===--------------------------------------------------------------------===//
7919 
7920 /// This function is partially responsible for generating the control flow
7921 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7922 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7923   MDNode *OrigLoopID = OrigLoop->getLoopID();
7924   Loop *Lp = createVectorLoopSkeleton("");
7925 
7926   // Generate the code to check the minimum iteration count of the vector
7927   // epilogue (see below).
7928   EPI.EpilogueIterationCountCheck =
7929       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
7930   EPI.EpilogueIterationCountCheck->setName("iter.check");
7931 
7932   // Generate the code to check any assumptions that we've made for SCEV
7933   // expressions.
7934   BasicBlock *SavedPreHeader = LoopVectorPreHeader;
7935   emitSCEVChecks(Lp, LoopScalarPreHeader);
7936 
7937   // If a safety check was generated save it.
7938   if (SavedPreHeader != LoopVectorPreHeader)
7939     EPI.SCEVSafetyCheck = SavedPreHeader;
7940 
7941   // Generate the code that checks at runtime if arrays overlap. We put the
7942   // checks into a separate block to make the more common case of few elements
7943   // faster.
7944   SavedPreHeader = LoopVectorPreHeader;
7945   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
7946 
7947   // If a safety check was generated save/overwite it.
7948   if (SavedPreHeader != LoopVectorPreHeader)
7949     EPI.MemSafetyCheck = SavedPreHeader;
7950 
7951   // Generate the iteration count check for the main loop, *after* the check
7952   // for the epilogue loop, so that the path-length is shorter for the case
7953   // that goes directly through the vector epilogue. The longer-path length for
7954   // the main loop is compensated for, by the gain from vectorizing the larger
7955   // trip count. Note: the branch will get updated later on when we vectorize
7956   // the epilogue.
7957   EPI.MainLoopIterationCountCheck =
7958       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
7959 
7960   // Generate the induction variable.
7961   OldInduction = Legal->getPrimaryInduction();
7962   Type *IdxTy = Legal->getWidestInductionType();
7963   Value *StartIdx = ConstantInt::get(IdxTy, 0);
7964   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
7965   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7966   EPI.VectorTripCount = CountRoundDown;
7967   Induction =
7968       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
7969                               getDebugLocFromInstOrOperands(OldInduction));
7970 
7971   // Skip induction resume value creation here because they will be created in
7972   // the second pass. If we created them here, they wouldn't be used anyway,
7973   // because the vplan in the second pass still contains the inductions from the
7974   // original loop.
7975 
7976   return completeLoopSkeleton(Lp, OrigLoopID);
7977 }
7978 
7979 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7980   LLVM_DEBUG({
7981     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7982            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
7983            << ", Main Loop UF:" << EPI.MainLoopUF
7984            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
7985            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7986   });
7987 }
7988 
7989 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7990   DEBUG_WITH_TYPE(VerboseDebug, {
7991     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
7992   });
7993 }
7994 
7995 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
7996     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
7997   assert(L && "Expected valid Loop.");
7998   assert(Bypass && "Expected valid bypass basic block.");
7999   unsigned VFactor =
8000       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8001   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8002   Value *Count = getOrCreateTripCount(L);
8003   // Reuse existing vector loop preheader for TC checks.
8004   // Note that new preheader block is generated for vector loop.
8005   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8006   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8007 
8008   // Generate code to check if the loop's trip count is less than VF * UF of the
8009   // main vector loop.
8010   auto P =
8011       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8012 
8013   Value *CheckMinIters = Builder.CreateICmp(
8014       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8015       "min.iters.check");
8016 
8017   if (!ForEpilogue)
8018     TCCheckBlock->setName("vector.main.loop.iter.check");
8019 
8020   // Create new preheader for vector loop.
8021   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8022                                    DT, LI, nullptr, "vector.ph");
8023 
8024   if (ForEpilogue) {
8025     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8026                                  DT->getNode(Bypass)->getIDom()) &&
8027            "TC check is expected to dominate Bypass");
8028 
8029     // Update dominator for Bypass & LoopExit.
8030     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8031     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8032 
8033     LoopBypassBlocks.push_back(TCCheckBlock);
8034 
8035     // Save the trip count so we don't have to regenerate it in the
8036     // vec.epilog.iter.check. This is safe to do because the trip count
8037     // generated here dominates the vector epilog iter check.
8038     EPI.TripCount = Count;
8039   }
8040 
8041   ReplaceInstWithInst(
8042       TCCheckBlock->getTerminator(),
8043       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8044 
8045   return TCCheckBlock;
8046 }
8047 
8048 //===--------------------------------------------------------------------===//
8049 // EpilogueVectorizerEpilogueLoop
8050 //===--------------------------------------------------------------------===//
8051 
8052 /// This function is partially responsible for generating the control flow
8053 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8054 BasicBlock *
8055 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8056   MDNode *OrigLoopID = OrigLoop->getLoopID();
8057   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8058 
8059   // Now, compare the remaining count and if there aren't enough iterations to
8060   // execute the vectorized epilogue skip to the scalar part.
8061   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8062   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8063   LoopVectorPreHeader =
8064       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8065                  LI, nullptr, "vec.epilog.ph");
8066   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8067                                           VecEpilogueIterationCountCheck);
8068 
8069   // Adjust the control flow taking the state info from the main loop
8070   // vectorization into account.
8071   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8072          "expected this to be saved from the previous pass.");
8073   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8074       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8075 
8076   DT->changeImmediateDominator(LoopVectorPreHeader,
8077                                EPI.MainLoopIterationCountCheck);
8078 
8079   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8080       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8081 
8082   if (EPI.SCEVSafetyCheck)
8083     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8084         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8085   if (EPI.MemSafetyCheck)
8086     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8087         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8088 
8089   DT->changeImmediateDominator(
8090       VecEpilogueIterationCountCheck,
8091       VecEpilogueIterationCountCheck->getSinglePredecessor());
8092 
8093   DT->changeImmediateDominator(LoopScalarPreHeader,
8094                                EPI.EpilogueIterationCountCheck);
8095   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8096 
8097   // Keep track of bypass blocks, as they feed start values to the induction
8098   // phis in the scalar loop preheader.
8099   if (EPI.SCEVSafetyCheck)
8100     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8101   if (EPI.MemSafetyCheck)
8102     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8103   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8104 
8105   // Generate a resume induction for the vector epilogue and put it in the
8106   // vector epilogue preheader
8107   Type *IdxTy = Legal->getWidestInductionType();
8108   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8109                                          LoopVectorPreHeader->getFirstNonPHI());
8110   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8111   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8112                            EPI.MainLoopIterationCountCheck);
8113 
8114   // Generate the induction variable.
8115   OldInduction = Legal->getPrimaryInduction();
8116   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8117   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8118   Value *StartIdx = EPResumeVal;
8119   Induction =
8120       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8121                               getDebugLocFromInstOrOperands(OldInduction));
8122 
8123   // Generate induction resume values. These variables save the new starting
8124   // indexes for the scalar loop. They are used to test if there are any tail
8125   // iterations left once the vector loop has completed.
8126   // Note that when the vectorized epilogue is skipped due to iteration count
8127   // check, then the resume value for the induction variable comes from
8128   // the trip count of the main vector loop, hence passing the AdditionalBypass
8129   // argument.
8130   createInductionResumeValues(Lp, CountRoundDown,
8131                               {VecEpilogueIterationCountCheck,
8132                                EPI.VectorTripCount} /* AdditionalBypass */);
8133 
8134   AddRuntimeUnrollDisableMetaData(Lp);
8135   return completeLoopSkeleton(Lp, OrigLoopID);
8136 }
8137 
8138 BasicBlock *
8139 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8140     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8141 
8142   assert(EPI.TripCount &&
8143          "Expected trip count to have been safed in the first pass.");
8144   assert(
8145       (!isa<Instruction>(EPI.TripCount) ||
8146        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8147       "saved trip count does not dominate insertion point.");
8148   Value *TC = EPI.TripCount;
8149   IRBuilder<> Builder(Insert->getTerminator());
8150   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8151 
8152   // Generate code to check if the loop's trip count is less than VF * UF of the
8153   // vector epilogue loop.
8154   auto P =
8155       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8156 
8157   Value *CheckMinIters = Builder.CreateICmp(
8158       P, Count,
8159       ConstantInt::get(Count->getType(),
8160                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8161       "min.epilog.iters.check");
8162 
8163   ReplaceInstWithInst(
8164       Insert->getTerminator(),
8165       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8166 
8167   LoopBypassBlocks.push_back(Insert);
8168   return Insert;
8169 }
8170 
8171 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8172   LLVM_DEBUG({
8173     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8174            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8175            << ", Main Loop UF:" << EPI.MainLoopUF
8176            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8177            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8178   });
8179 }
8180 
8181 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8182   DEBUG_WITH_TYPE(VerboseDebug, {
8183     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8184   });
8185 }
8186 
8187 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8188     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8189   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8190   bool PredicateAtRangeStart = Predicate(Range.Start);
8191 
8192   for (ElementCount TmpVF = Range.Start * 2;
8193        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8194     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8195       Range.End = TmpVF;
8196       break;
8197     }
8198 
8199   return PredicateAtRangeStart;
8200 }
8201 
8202 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8203 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8204 /// of VF's starting at a given VF and extending it as much as possible. Each
8205 /// vectorization decision can potentially shorten this sub-range during
8206 /// buildVPlan().
8207 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8208                                            ElementCount MaxVF) {
8209   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8210   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8211     VFRange SubRange = {VF, MaxVFPlusOne};
8212     VPlans.push_back(buildVPlan(SubRange));
8213     VF = SubRange.End;
8214   }
8215 }
8216 
8217 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8218                                          VPlanPtr &Plan) {
8219   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8220 
8221   // Look for cached value.
8222   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8223   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8224   if (ECEntryIt != EdgeMaskCache.end())
8225     return ECEntryIt->second;
8226 
8227   VPValue *SrcMask = createBlockInMask(Src, Plan);
8228 
8229   // The terminator has to be a branch inst!
8230   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8231   assert(BI && "Unexpected terminator found");
8232 
8233   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8234     return EdgeMaskCache[Edge] = SrcMask;
8235 
8236   // If source is an exiting block, we know the exit edge is dynamically dead
8237   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8238   // adding uses of an otherwise potentially dead instruction.
8239   if (OrigLoop->isLoopExiting(Src))
8240     return EdgeMaskCache[Edge] = SrcMask;
8241 
8242   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8243   assert(EdgeMask && "No Edge Mask found for condition");
8244 
8245   if (BI->getSuccessor(0) != Dst)
8246     EdgeMask = Builder.createNot(EdgeMask);
8247 
8248   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
8249     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
8250 
8251   return EdgeMaskCache[Edge] = EdgeMask;
8252 }
8253 
8254 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8255   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8256 
8257   // Look for cached value.
8258   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8259   if (BCEntryIt != BlockMaskCache.end())
8260     return BCEntryIt->second;
8261 
8262   // All-one mask is modelled as no-mask following the convention for masked
8263   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8264   VPValue *BlockMask = nullptr;
8265 
8266   if (OrigLoop->getHeader() == BB) {
8267     if (!CM.blockNeedsPredication(BB))
8268       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8269 
8270     // Create the block in mask as the first non-phi instruction in the block.
8271     VPBuilder::InsertPointGuard Guard(Builder);
8272     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8273     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8274 
8275     // Introduce the early-exit compare IV <= BTC to form header block mask.
8276     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8277     // Start by constructing the desired canonical IV.
8278     VPValue *IV = nullptr;
8279     if (Legal->getPrimaryInduction())
8280       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8281     else {
8282       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8283       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8284       IV = IVRecipe->getVPValue();
8285     }
8286     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8287     bool TailFolded = !CM.isScalarEpilogueAllowed();
8288 
8289     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8290       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8291       // as a second argument, we only pass the IV here and extract the
8292       // tripcount from the transform state where codegen of the VP instructions
8293       // happen.
8294       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8295     } else {
8296       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8297     }
8298     return BlockMaskCache[BB] = BlockMask;
8299   }
8300 
8301   // This is the block mask. We OR all incoming edges.
8302   for (auto *Predecessor : predecessors(BB)) {
8303     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8304     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8305       return BlockMaskCache[BB] = EdgeMask;
8306 
8307     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8308       BlockMask = EdgeMask;
8309       continue;
8310     }
8311 
8312     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8313   }
8314 
8315   return BlockMaskCache[BB] = BlockMask;
8316 }
8317 
8318 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
8319                                                 VPlanPtr &Plan) {
8320   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8321          "Must be called with either a load or store");
8322 
8323   auto willWiden = [&](ElementCount VF) -> bool {
8324     if (VF.isScalar())
8325       return false;
8326     LoopVectorizationCostModel::InstWidening Decision =
8327         CM.getWideningDecision(I, VF);
8328     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8329            "CM decision should be taken at this point.");
8330     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8331       return true;
8332     if (CM.isScalarAfterVectorization(I, VF) ||
8333         CM.isProfitableToScalarize(I, VF))
8334       return false;
8335     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8336   };
8337 
8338   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8339     return nullptr;
8340 
8341   VPValue *Mask = nullptr;
8342   if (Legal->isMaskRequired(I))
8343     Mask = createBlockInMask(I->getParent(), Plan);
8344 
8345   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
8346   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8347     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
8348 
8349   StoreInst *Store = cast<StoreInst>(I);
8350   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
8351   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
8352 }
8353 
8354 VPWidenIntOrFpInductionRecipe *
8355 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const {
8356   // Check if this is an integer or fp induction. If so, build the recipe that
8357   // produces its scalar and vector values.
8358   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8359   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8360       II.getKind() == InductionDescriptor::IK_FpInduction) {
8361     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8362     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8363     return new VPWidenIntOrFpInductionRecipe(
8364         Phi, Start, Casts.empty() ? nullptr : Casts.front());
8365   }
8366 
8367   return nullptr;
8368 }
8369 
8370 VPWidenIntOrFpInductionRecipe *
8371 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range,
8372                                                 VPlan &Plan) const {
8373   // Optimize the special case where the source is a constant integer
8374   // induction variable. Notice that we can only optimize the 'trunc' case
8375   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8376   // (c) other casts depend on pointer size.
8377 
8378   // Determine whether \p K is a truncation based on an induction variable that
8379   // can be optimized.
8380   auto isOptimizableIVTruncate =
8381       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8382     return [=](ElementCount VF) -> bool {
8383       return CM.isOptimizableIVTruncate(K, VF);
8384     };
8385   };
8386 
8387   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8388           isOptimizableIVTruncate(I), Range)) {
8389 
8390     InductionDescriptor II =
8391         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8392     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8393     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8394                                              Start, nullptr, I);
8395   }
8396   return nullptr;
8397 }
8398 
8399 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
8400   // We know that all PHIs in non-header blocks are converted into selects, so
8401   // we don't have to worry about the insertion order and we can just use the
8402   // builder. At this point we generate the predication tree. There may be
8403   // duplications since this is a simple recursive scan, but future
8404   // optimizations will clean it up.
8405 
8406   SmallVector<VPValue *, 2> Operands;
8407   unsigned NumIncoming = Phi->getNumIncomingValues();
8408   for (unsigned In = 0; In < NumIncoming; In++) {
8409     VPValue *EdgeMask =
8410       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8411     assert((EdgeMask || NumIncoming == 1) &&
8412            "Multiple predecessors with one having a full mask");
8413     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
8414     if (EdgeMask)
8415       Operands.push_back(EdgeMask);
8416   }
8417   return new VPBlendRecipe(Phi, Operands);
8418 }
8419 
8420 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
8421                                                    VPlan &Plan) const {
8422 
8423   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8424       [this, CI](ElementCount VF) {
8425         return CM.isScalarWithPredication(CI, VF);
8426       },
8427       Range);
8428 
8429   if (IsPredicated)
8430     return nullptr;
8431 
8432   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8433   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8434              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8435              ID == Intrinsic::pseudoprobe ||
8436              ID == Intrinsic::experimental_noalias_scope_decl))
8437     return nullptr;
8438 
8439   auto willWiden = [&](ElementCount VF) -> bool {
8440     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8441     // The following case may be scalarized depending on the VF.
8442     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8443     // version of the instruction.
8444     // Is it beneficial to perform intrinsic call compared to lib call?
8445     bool NeedToScalarize = false;
8446     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8447     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8448     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8449     assert(IntrinsicCost.isValid() && CallCost.isValid() &&
8450            "Cannot have invalid costs while widening");
8451     return UseVectorIntrinsic || !NeedToScalarize;
8452   };
8453 
8454   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8455     return nullptr;
8456 
8457   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
8458 }
8459 
8460 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8461   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8462          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8463   // Instruction should be widened, unless it is scalar after vectorization,
8464   // scalarization is profitable or it is predicated.
8465   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8466     return CM.isScalarAfterVectorization(I, VF) ||
8467            CM.isProfitableToScalarize(I, VF) ||
8468            CM.isScalarWithPredication(I, VF);
8469   };
8470   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8471                                                              Range);
8472 }
8473 
8474 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
8475   auto IsVectorizableOpcode = [](unsigned Opcode) {
8476     switch (Opcode) {
8477     case Instruction::Add:
8478     case Instruction::And:
8479     case Instruction::AShr:
8480     case Instruction::BitCast:
8481     case Instruction::FAdd:
8482     case Instruction::FCmp:
8483     case Instruction::FDiv:
8484     case Instruction::FMul:
8485     case Instruction::FNeg:
8486     case Instruction::FPExt:
8487     case Instruction::FPToSI:
8488     case Instruction::FPToUI:
8489     case Instruction::FPTrunc:
8490     case Instruction::FRem:
8491     case Instruction::FSub:
8492     case Instruction::ICmp:
8493     case Instruction::IntToPtr:
8494     case Instruction::LShr:
8495     case Instruction::Mul:
8496     case Instruction::Or:
8497     case Instruction::PtrToInt:
8498     case Instruction::SDiv:
8499     case Instruction::Select:
8500     case Instruction::SExt:
8501     case Instruction::Shl:
8502     case Instruction::SIToFP:
8503     case Instruction::SRem:
8504     case Instruction::Sub:
8505     case Instruction::Trunc:
8506     case Instruction::UDiv:
8507     case Instruction::UIToFP:
8508     case Instruction::URem:
8509     case Instruction::Xor:
8510     case Instruction::ZExt:
8511       return true;
8512     }
8513     return false;
8514   };
8515 
8516   if (!IsVectorizableOpcode(I->getOpcode()))
8517     return nullptr;
8518 
8519   // Success: widen this instruction.
8520   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
8521 }
8522 
8523 VPBasicBlock *VPRecipeBuilder::handleReplication(
8524     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8525     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
8526     VPlanPtr &Plan) {
8527   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8528       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8529       Range);
8530 
8531   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8532       [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); },
8533       Range);
8534 
8535   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8536                                        IsUniform, IsPredicated);
8537   setRecipe(I, Recipe);
8538   Plan->addVPValue(I, Recipe);
8539 
8540   // Find if I uses a predicated instruction. If so, it will use its scalar
8541   // value. Avoid hoisting the insert-element which packs the scalar value into
8542   // a vector value, as that happens iff all users use the vector value.
8543   for (auto &Op : I->operands())
8544     if (auto *PredInst = dyn_cast<Instruction>(Op))
8545       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
8546         PredInst2Recipe[PredInst]->setAlsoPack(false);
8547 
8548   // Finalize the recipe for Instr, first if it is not predicated.
8549   if (!IsPredicated) {
8550     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8551     VPBB->appendRecipe(Recipe);
8552     return VPBB;
8553   }
8554   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8555   assert(VPBB->getSuccessors().empty() &&
8556          "VPBB has successors when handling predicated replication.");
8557   // Record predicated instructions for above packing optimizations.
8558   PredInst2Recipe[I] = Recipe;
8559   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8560   VPBlockUtils::insertBlockAfter(Region, VPBB);
8561   auto *RegSucc = new VPBasicBlock();
8562   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8563   return RegSucc;
8564 }
8565 
8566 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8567                                                       VPRecipeBase *PredRecipe,
8568                                                       VPlanPtr &Plan) {
8569   // Instructions marked for predication are replicated and placed under an
8570   // if-then construct to prevent side-effects.
8571 
8572   // Generate recipes to compute the block mask for this region.
8573   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8574 
8575   // Build the triangular if-then region.
8576   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8577   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8578   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8579   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8580   auto *PHIRecipe = Instr->getType()->isVoidTy()
8581                         ? nullptr
8582                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8583   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8584   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8585   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8586 
8587   // Note: first set Entry as region entry and then connect successors starting
8588   // from it in order, to propagate the "parent" of each VPBasicBlock.
8589   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8590   VPBlockUtils::connectBlocks(Pred, Exit);
8591 
8592   return Region;
8593 }
8594 
8595 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8596                                                       VFRange &Range,
8597                                                       VPlanPtr &Plan) {
8598   // First, check for specific widening recipes that deal with calls, memory
8599   // operations, inductions and Phi nodes.
8600   if (auto *CI = dyn_cast<CallInst>(Instr))
8601     return tryToWidenCall(CI, Range, *Plan);
8602 
8603   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8604     return tryToWidenMemory(Instr, Range, Plan);
8605 
8606   VPRecipeBase *Recipe;
8607   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8608     if (Phi->getParent() != OrigLoop->getHeader())
8609       return tryToBlend(Phi, Plan);
8610     if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan)))
8611       return Recipe;
8612 
8613     if (Legal->isReductionVariable(Phi)) {
8614       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8615       VPValue *StartV =
8616           Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue());
8617       return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8618     }
8619 
8620     return new VPWidenPHIRecipe(Phi);
8621   }
8622 
8623   if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8624                                     cast<TruncInst>(Instr), Range, *Plan)))
8625     return Recipe;
8626 
8627   if (!shouldWiden(Instr, Range))
8628     return nullptr;
8629 
8630   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8631     return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
8632                                 OrigLoop);
8633 
8634   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8635     bool InvariantCond =
8636         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8637     return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
8638                                    InvariantCond);
8639   }
8640 
8641   return tryToWiden(Instr, *Plan);
8642 }
8643 
8644 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8645                                                         ElementCount MaxVF) {
8646   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8647 
8648   // Collect instructions from the original loop that will become trivially dead
8649   // in the vectorized loop. We don't need to vectorize these instructions. For
8650   // example, original induction update instructions can become dead because we
8651   // separately emit induction "steps" when generating code for the new loop.
8652   // Similarly, we create a new latch condition when setting up the structure
8653   // of the new loop, so the old one can become dead.
8654   SmallPtrSet<Instruction *, 4> DeadInstructions;
8655   collectTriviallyDeadInstructions(DeadInstructions);
8656 
8657   // Add assume instructions we need to drop to DeadInstructions, to prevent
8658   // them from being added to the VPlan.
8659   // TODO: We only need to drop assumes in blocks that get flattend. If the
8660   // control flow is preserved, we should keep them.
8661   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8662   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8663 
8664   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8665   // Dead instructions do not need sinking. Remove them from SinkAfter.
8666   for (Instruction *I : DeadInstructions)
8667     SinkAfter.erase(I);
8668 
8669   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8670   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8671     VFRange SubRange = {VF, MaxVFPlusOne};
8672     VPlans.push_back(
8673         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8674     VF = SubRange.End;
8675   }
8676 }
8677 
8678 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8679     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8680     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
8681 
8682   // Hold a mapping from predicated instructions to their recipes, in order to
8683   // fix their AlsoPack behavior if a user is determined to replicate and use a
8684   // scalar instead of vector value.
8685   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
8686 
8687   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8688 
8689   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8690 
8691   // ---------------------------------------------------------------------------
8692   // Pre-construction: record ingredients whose recipes we'll need to further
8693   // process after constructing the initial VPlan.
8694   // ---------------------------------------------------------------------------
8695 
8696   // Mark instructions we'll need to sink later and their targets as
8697   // ingredients whose recipe we'll need to record.
8698   for (auto &Entry : SinkAfter) {
8699     RecipeBuilder.recordRecipeOf(Entry.first);
8700     RecipeBuilder.recordRecipeOf(Entry.second);
8701   }
8702   for (auto &Reduction : CM.getInLoopReductionChains()) {
8703     PHINode *Phi = Reduction.first;
8704     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
8705     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8706 
8707     RecipeBuilder.recordRecipeOf(Phi);
8708     for (auto &R : ReductionOperations) {
8709       RecipeBuilder.recordRecipeOf(R);
8710       // For min/max reducitons, where we have a pair of icmp/select, we also
8711       // need to record the ICmp recipe, so it can be removed later.
8712       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8713         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8714     }
8715   }
8716 
8717   // For each interleave group which is relevant for this (possibly trimmed)
8718   // Range, add it to the set of groups to be later applied to the VPlan and add
8719   // placeholders for its members' Recipes which we'll be replacing with a
8720   // single VPInterleaveRecipe.
8721   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8722     auto applyIG = [IG, this](ElementCount VF) -> bool {
8723       return (VF.isVector() && // Query is illegal for VF == 1
8724               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8725                   LoopVectorizationCostModel::CM_Interleave);
8726     };
8727     if (!getDecisionAndClampRange(applyIG, Range))
8728       continue;
8729     InterleaveGroups.insert(IG);
8730     for (unsigned i = 0; i < IG->getFactor(); i++)
8731       if (Instruction *Member = IG->getMember(i))
8732         RecipeBuilder.recordRecipeOf(Member);
8733   };
8734 
8735   // ---------------------------------------------------------------------------
8736   // Build initial VPlan: Scan the body of the loop in a topological order to
8737   // visit each basic block after having visited its predecessor basic blocks.
8738   // ---------------------------------------------------------------------------
8739 
8740   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8741   auto Plan = std::make_unique<VPlan>();
8742   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8743   Plan->setEntry(VPBB);
8744 
8745   // Scan the body of the loop in a topological order to visit each basic block
8746   // after having visited its predecessor basic blocks.
8747   LoopBlocksDFS DFS(OrigLoop);
8748   DFS.perform(LI);
8749 
8750   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8751     // Relevant instructions from basic block BB will be grouped into VPRecipe
8752     // ingredients and fill a new VPBasicBlock.
8753     unsigned VPBBsForBB = 0;
8754     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8755     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
8756     VPBB = FirstVPBBForBB;
8757     Builder.setInsertPoint(VPBB);
8758 
8759     // Introduce each ingredient into VPlan.
8760     // TODO: Model and preserve debug instrinsics in VPlan.
8761     for (Instruction &I : BB->instructionsWithoutDebug()) {
8762       Instruction *Instr = &I;
8763 
8764       // First filter out irrelevant instructions, to ensure no recipes are
8765       // built for them.
8766       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8767         continue;
8768 
8769       if (auto Recipe =
8770               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
8771         for (auto *Def : Recipe->definedValues()) {
8772           auto *UV = Def->getUnderlyingValue();
8773           Plan->addVPValue(UV, Def);
8774         }
8775 
8776         RecipeBuilder.setRecipe(Instr, Recipe);
8777         VPBB->appendRecipe(Recipe);
8778         continue;
8779       }
8780 
8781       // Otherwise, if all widening options failed, Instruction is to be
8782       // replicated. This may create a successor for VPBB.
8783       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
8784           Instr, Range, VPBB, PredInst2Recipe, Plan);
8785       if (NextVPBB != VPBB) {
8786         VPBB = NextVPBB;
8787         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8788                                     : "");
8789       }
8790     }
8791   }
8792 
8793   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8794   // may also be empty, such as the last one VPBB, reflecting original
8795   // basic-blocks with no recipes.
8796   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8797   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8798   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8799   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
8800   delete PreEntry;
8801 
8802   // ---------------------------------------------------------------------------
8803   // Transform initial VPlan: Apply previously taken decisions, in order, to
8804   // bring the VPlan to its final state.
8805   // ---------------------------------------------------------------------------
8806 
8807   // Apply Sink-After legal constraints.
8808   for (auto &Entry : SinkAfter) {
8809     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8810     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8811     // If the target is in a replication region, make sure to move Sink to the
8812     // block after it, not into the replication region itself.
8813     if (auto *Region =
8814             dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) {
8815       if (Region->isReplicator()) {
8816         assert(Region->getNumSuccessors() == 1 && "Expected SESE region!");
8817         VPBasicBlock *NextBlock =
8818             cast<VPBasicBlock>(Region->getSuccessors().front());
8819         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8820         continue;
8821       }
8822     }
8823     Sink->moveAfter(Target);
8824   }
8825 
8826   // Interleave memory: for each Interleave Group we marked earlier as relevant
8827   // for this VPlan, replace the Recipes widening its memory instructions with a
8828   // single VPInterleaveRecipe at its insertion point.
8829   for (auto IG : InterleaveGroups) {
8830     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8831         RecipeBuilder.getRecipe(IG->getInsertPos()));
8832     SmallVector<VPValue *, 4> StoredValues;
8833     for (unsigned i = 0; i < IG->getFactor(); ++i)
8834       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
8835         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
8836 
8837     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8838                                         Recipe->getMask());
8839     VPIG->insertBefore(Recipe);
8840     unsigned J = 0;
8841     for (unsigned i = 0; i < IG->getFactor(); ++i)
8842       if (Instruction *Member = IG->getMember(i)) {
8843         if (!Member->getType()->isVoidTy()) {
8844           VPValue *OriginalV = Plan->getVPValue(Member);
8845           Plan->removeVPValueFor(Member);
8846           Plan->addVPValue(Member, VPIG->getVPValue(J));
8847           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8848           J++;
8849         }
8850         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8851       }
8852   }
8853 
8854   // Adjust the recipes for any inloop reductions.
8855   if (Range.Start.isVector())
8856     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
8857 
8858   // Finally, if tail is folded by masking, introduce selects between the phi
8859   // and the live-out instruction of each reduction, at the end of the latch.
8860   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
8861     Builder.setInsertPoint(VPBB);
8862     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
8863     for (auto &Reduction : Legal->getReductionVars()) {
8864       if (CM.isInLoopReduction(Reduction.first))
8865         continue;
8866       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
8867       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
8868       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
8869     }
8870   }
8871 
8872   std::string PlanName;
8873   raw_string_ostream RSO(PlanName);
8874   ElementCount VF = Range.Start;
8875   Plan->addVF(VF);
8876   RSO << "Initial VPlan for VF={" << VF;
8877   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
8878     Plan->addVF(VF);
8879     RSO << "," << VF;
8880   }
8881   RSO << "},UF>=1";
8882   RSO.flush();
8883   Plan->setName(PlanName);
8884 
8885   return Plan;
8886 }
8887 
8888 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
8889   // Outer loop handling: They may require CFG and instruction level
8890   // transformations before even evaluating whether vectorization is profitable.
8891   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8892   // the vectorization pipeline.
8893   assert(!OrigLoop->isInnermost());
8894   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8895 
8896   // Create new empty VPlan
8897   auto Plan = std::make_unique<VPlan>();
8898 
8899   // Build hierarchical CFG
8900   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
8901   HCFGBuilder.buildHierarchicalCFG();
8902 
8903   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
8904        VF *= 2)
8905     Plan->addVF(VF);
8906 
8907   if (EnableVPlanPredication) {
8908     VPlanPredicator VPP(*Plan);
8909     VPP.predicate();
8910 
8911     // Avoid running transformation to recipes until masked code generation in
8912     // VPlan-native path is in place.
8913     return Plan;
8914   }
8915 
8916   SmallPtrSet<Instruction *, 1> DeadInstructions;
8917   VPlanTransforms::VPInstructionsToVPRecipes(
8918       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
8919   return Plan;
8920 }
8921 
8922 // Adjust the recipes for any inloop reductions. The chain of instructions
8923 // leading from the loop exit instr to the phi need to be converted to
8924 // reductions, with one operand being vector and the other being the scalar
8925 // reduction chain.
8926 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
8927     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
8928   for (auto &Reduction : CM.getInLoopReductionChains()) {
8929     PHINode *Phi = Reduction.first;
8930     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8931     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8932 
8933     // ReductionOperations are orders top-down from the phi's use to the
8934     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
8935     // which of the two operands will remain scalar and which will be reduced.
8936     // For minmax the chain will be the select instructions.
8937     Instruction *Chain = Phi;
8938     for (Instruction *R : ReductionOperations) {
8939       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
8940       RecurKind Kind = RdxDesc.getRecurrenceKind();
8941 
8942       VPValue *ChainOp = Plan->getVPValue(Chain);
8943       unsigned FirstOpId;
8944       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8945         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
8946                "Expected to replace a VPWidenSelectSC");
8947         FirstOpId = 1;
8948       } else {
8949         assert(isa<VPWidenRecipe>(WidenRecipe) &&
8950                "Expected to replace a VPWidenSC");
8951         FirstOpId = 0;
8952       }
8953       unsigned VecOpId =
8954           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
8955       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
8956 
8957       auto *CondOp = CM.foldTailByMasking()
8958                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
8959                          : nullptr;
8960       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
8961           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
8962       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8963       Plan->removeVPValueFor(R);
8964       Plan->addVPValue(R, RedRecipe);
8965       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
8966       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8967       WidenRecipe->eraseFromParent();
8968 
8969       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8970         VPRecipeBase *CompareRecipe =
8971             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
8972         assert(isa<VPWidenRecipe>(CompareRecipe) &&
8973                "Expected to replace a VPWidenSC");
8974         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
8975                "Expected no remaining users");
8976         CompareRecipe->eraseFromParent();
8977       }
8978       Chain = R;
8979     }
8980   }
8981 }
8982 
8983 Value* LoopVectorizationPlanner::VPCallbackILV::
8984 getOrCreateVectorValues(Value *V, unsigned Part) {
8985       return ILV.getOrCreateVectorValue(V, Part);
8986 }
8987 
8988 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
8989     Value *V, const VPIteration &Instance) {
8990   return ILV.getOrCreateScalarValue(V, Instance);
8991 }
8992 
8993 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
8994                                VPSlotTracker &SlotTracker) const {
8995   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
8996   IG->getInsertPos()->printAsOperand(O, false);
8997   O << ", ";
8998   getAddr()->printAsOperand(O, SlotTracker);
8999   VPValue *Mask = getMask();
9000   if (Mask) {
9001     O << ", ";
9002     Mask->printAsOperand(O, SlotTracker);
9003   }
9004   for (unsigned i = 0; i < IG->getFactor(); ++i)
9005     if (Instruction *I = IG->getMember(i))
9006       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
9007 }
9008 
9009 void VPWidenCallRecipe::execute(VPTransformState &State) {
9010   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9011                                   *this, State);
9012 }
9013 
9014 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9015   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9016                                     this, *this, InvariantCond, State);
9017 }
9018 
9019 void VPWidenRecipe::execute(VPTransformState &State) {
9020   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9021 }
9022 
9023 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9024   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9025                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9026                       IsIndexLoopInvariant, State);
9027 }
9028 
9029 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9030   assert(!State.Instance && "Int or FP induction being replicated.");
9031   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9032                                    getTruncInst(), getVPValue(0),
9033                                    getCastValue(), State);
9034 }
9035 
9036 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9037   Value *StartV =
9038       getStartValue() ? getStartValue()->getLiveInIRValue() : nullptr;
9039   State.ILV->widenPHIInstruction(Phi, RdxDesc, StartV, State.UF, State.VF);
9040 }
9041 
9042 void VPBlendRecipe::execute(VPTransformState &State) {
9043   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9044   // We know that all PHIs in non-header blocks are converted into
9045   // selects, so we don't have to worry about the insertion order and we
9046   // can just use the builder.
9047   // At this point we generate the predication tree. There may be
9048   // duplications since this is a simple recursive scan, but future
9049   // optimizations will clean it up.
9050 
9051   unsigned NumIncoming = getNumIncomingValues();
9052 
9053   // Generate a sequence of selects of the form:
9054   // SELECT(Mask3, In3,
9055   //        SELECT(Mask2, In2,
9056   //               SELECT(Mask1, In1,
9057   //                      In0)))
9058   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9059   // are essentially undef are taken from In0.
9060   InnerLoopVectorizer::VectorParts Entry(State.UF);
9061   for (unsigned In = 0; In < NumIncoming; ++In) {
9062     for (unsigned Part = 0; Part < State.UF; ++Part) {
9063       // We might have single edge PHIs (blocks) - use an identity
9064       // 'select' for the first PHI operand.
9065       Value *In0 = State.get(getIncomingValue(In), Part);
9066       if (In == 0)
9067         Entry[Part] = In0; // Initialize with the first incoming value.
9068       else {
9069         // Select between the current value and the previous incoming edge
9070         // based on the incoming mask.
9071         Value *Cond = State.get(getMask(In), Part);
9072         Entry[Part] =
9073             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9074       }
9075     }
9076   }
9077   for (unsigned Part = 0; Part < State.UF; ++Part)
9078     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
9079 }
9080 
9081 void VPInterleaveRecipe::execute(VPTransformState &State) {
9082   assert(!State.Instance && "Interleave group being replicated.");
9083   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9084                                       getStoredValues(), getMask());
9085 }
9086 
9087 void VPReductionRecipe::execute(VPTransformState &State) {
9088   assert(!State.Instance && "Reduction being replicated.");
9089   for (unsigned Part = 0; Part < State.UF; ++Part) {
9090     RecurKind Kind = RdxDesc->getRecurrenceKind();
9091     Value *NewVecOp = State.get(getVecOp(), Part);
9092     if (VPValue *Cond = getCondOp()) {
9093       Value *NewCond = State.get(Cond, Part);
9094       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9095       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9096           Kind, VecTy->getElementType());
9097       Constant *IdenVec =
9098           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9099       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9100       NewVecOp = Select;
9101     }
9102     Value *NewRed =
9103         createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9104     Value *PrevInChain = State.get(getChainOp(), Part);
9105     Value *NextInChain;
9106     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9107       NextInChain =
9108           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9109                          NewRed, PrevInChain);
9110     } else {
9111       NextInChain = State.Builder.CreateBinOp(
9112           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9113           PrevInChain);
9114     }
9115     State.set(this, getUnderlyingInstr(), NextInChain, Part);
9116   }
9117 }
9118 
9119 void VPReplicateRecipe::execute(VPTransformState &State) {
9120   if (State.Instance) { // Generate a single instance.
9121     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9122     State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this,
9123                                     *State.Instance, IsPredicated, State);
9124     // Insert scalar instance packing it into a vector.
9125     if (AlsoPack && State.VF.isVector()) {
9126       // If we're constructing lane 0, initialize to start from poison.
9127       if (State.Instance->Lane == 0) {
9128         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9129         Value *Poison = PoisonValue::get(
9130             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9131         State.ValueMap.setVectorValue(getUnderlyingInstr(),
9132                                       State.Instance->Part, Poison);
9133       }
9134       State.ILV->packScalarIntoVectorValue(getUnderlyingInstr(),
9135                                            *State.Instance);
9136     }
9137     return;
9138   }
9139 
9140   // Generate scalar instances for all VF lanes of all UF parts, unless the
9141   // instruction is uniform inwhich case generate only the first lane for each
9142   // of the UF parts.
9143   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9144   assert((!State.VF.isScalable() || IsUniform) &&
9145          "Can't scalarize a scalable vector");
9146   for (unsigned Part = 0; Part < State.UF; ++Part)
9147     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9148       State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this,
9149                                       VPIteration(Part, Lane), IsPredicated,
9150                                       State);
9151 }
9152 
9153 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9154   assert(State.Instance && "Branch on Mask works only on single instance.");
9155 
9156   unsigned Part = State.Instance->Part;
9157   unsigned Lane = State.Instance->Lane;
9158 
9159   Value *ConditionBit = nullptr;
9160   VPValue *BlockInMask = getMask();
9161   if (BlockInMask) {
9162     ConditionBit = State.get(BlockInMask, Part);
9163     if (ConditionBit->getType()->isVectorTy())
9164       ConditionBit = State.Builder.CreateExtractElement(
9165           ConditionBit, State.Builder.getInt32(Lane));
9166   } else // Block in mask is all-one.
9167     ConditionBit = State.Builder.getTrue();
9168 
9169   // Replace the temporary unreachable terminator with a new conditional branch,
9170   // whose two destinations will be set later when they are created.
9171   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9172   assert(isa<UnreachableInst>(CurrentTerminator) &&
9173          "Expected to replace unreachable terminator with conditional branch.");
9174   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9175   CondBr->setSuccessor(0, nullptr);
9176   ReplaceInstWithInst(CurrentTerminator, CondBr);
9177 }
9178 
9179 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9180   assert(State.Instance && "Predicated instruction PHI works per instance.");
9181   Instruction *ScalarPredInst =
9182       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9183   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9184   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9185   assert(PredicatingBB && "Predicated block has no single predecessor.");
9186 
9187   // By current pack/unpack logic we need to generate only a single phi node: if
9188   // a vector value for the predicated instruction exists at this point it means
9189   // the instruction has vector users only, and a phi for the vector value is
9190   // needed. In this case the recipe of the predicated instruction is marked to
9191   // also do that packing, thereby "hoisting" the insert-element sequence.
9192   // Otherwise, a phi node for the scalar value is needed.
9193   unsigned Part = State.Instance->Part;
9194   Instruction *PredInst =
9195       cast<Instruction>(getOperand(0)->getUnderlyingValue());
9196   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
9197     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
9198     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9199     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9200     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9201     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9202     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
9203   } else {
9204     Type *PredInstType = PredInst->getType();
9205     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9206     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), PredicatingBB);
9207     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9208     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
9209   }
9210 }
9211 
9212 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9213   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9214   State.ILV->vectorizeMemoryInstruction(&Ingredient, State,
9215                                         StoredValue ? nullptr : getVPValue(),
9216                                         getAddr(), StoredValue, getMask());
9217 }
9218 
9219 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9220 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9221 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9222 // for predication.
9223 static ScalarEpilogueLowering getScalarEpilogueLowering(
9224     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9225     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9226     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9227     LoopVectorizationLegality &LVL) {
9228   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9229   // don't look at hints or options, and don't request a scalar epilogue.
9230   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9231   // LoopAccessInfo (due to code dependency and not being able to reliably get
9232   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9233   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9234   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9235   // back to the old way and vectorize with versioning when forced. See D81345.)
9236   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9237                                                       PGSOQueryType::IRPass) &&
9238                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9239     return CM_ScalarEpilogueNotAllowedOptSize;
9240 
9241   // 2) If set, obey the directives
9242   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9243     switch (PreferPredicateOverEpilogue) {
9244     case PreferPredicateTy::ScalarEpilogue:
9245       return CM_ScalarEpilogueAllowed;
9246     case PreferPredicateTy::PredicateElseScalarEpilogue:
9247       return CM_ScalarEpilogueNotNeededUsePredicate;
9248     case PreferPredicateTy::PredicateOrDontVectorize:
9249       return CM_ScalarEpilogueNotAllowedUsePredicate;
9250     };
9251   }
9252 
9253   // 3) If set, obey the hints
9254   switch (Hints.getPredicate()) {
9255   case LoopVectorizeHints::FK_Enabled:
9256     return CM_ScalarEpilogueNotNeededUsePredicate;
9257   case LoopVectorizeHints::FK_Disabled:
9258     return CM_ScalarEpilogueAllowed;
9259   };
9260 
9261   // 4) if the TTI hook indicates this is profitable, request predication.
9262   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9263                                        LVL.getLAI()))
9264     return CM_ScalarEpilogueNotNeededUsePredicate;
9265 
9266   return CM_ScalarEpilogueAllowed;
9267 }
9268 
9269 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V,
9270                            const VPIteration &Instance) {
9271   set(Def, V, Instance);
9272   ILV->setScalarValue(IRDef, Instance, V);
9273 }
9274 
9275 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V,
9276                            unsigned Part) {
9277   set(Def, V, Part);
9278   ILV->setVectorValue(IRDef, Part, V);
9279 }
9280 
9281 void VPTransformState::reset(VPValue *Def, Value *IRDef, Value *V,
9282                              unsigned Part) {
9283   set(Def, V, Part);
9284   ILV->resetVectorValue(IRDef, Part, V);
9285 }
9286 
9287 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9288   // If Values have been set for this Def return the one relevant for \p Part.
9289   if (hasVectorValue(Def, Part))
9290     return Data.PerPartOutput[Def][Part];
9291 
9292   // TODO: Remove the callback once all scalar recipes are managed using
9293   // VPValues.
9294   if (!hasScalarValue(Def, {Part, 0}))
9295     return Callback.getOrCreateVectorValues(VPValue2Value[Def], Part);
9296 
9297   Value *ScalarValue = get(Def, {Part, 0});
9298   // If we aren't vectorizing, we can just copy the scalar map values over
9299   // to the vector map.
9300   if (VF.isScalar()) {
9301     set(Def, ScalarValue, Part);
9302     return ScalarValue;
9303   }
9304 
9305   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9306   bool IsUniform = RepR && RepR->isUniform();
9307 
9308   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9309   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9310 
9311   // Set the insert point after the last scalarized instruction. This
9312   // ensures the insertelement sequence will directly follow the scalar
9313   // definitions.
9314   auto OldIP = Builder.saveIP();
9315   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9316   Builder.SetInsertPoint(&*NewIP);
9317 
9318   // However, if we are vectorizing, we need to construct the vector values.
9319   // If the value is known to be uniform after vectorization, we can just
9320   // broadcast the scalar value corresponding to lane zero for each unroll
9321   // iteration. Otherwise, we construct the vector values using
9322   // insertelement instructions. Since the resulting vectors are stored in
9323   // VectorLoopValueMap, we will only generate the insertelements once.
9324   Value *VectorValue = nullptr;
9325   if (IsUniform) {
9326     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9327     set(Def, VectorValue, Part);
9328   } else {
9329     // Initialize packing with insertelements to start from undef.
9330     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9331     Value *Undef = UndefValue::get(VectorType::get(LastInst->getType(), VF));
9332     set(Def, Undef, Part);
9333     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9334       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9335     VectorValue = get(Def, Part);
9336   }
9337   Builder.restoreIP(OldIP);
9338   return VectorValue;
9339 }
9340 
9341 // Process the loop in the VPlan-native vectorization path. This path builds
9342 // VPlan upfront in the vectorization pipeline, which allows to apply
9343 // VPlan-to-VPlan transformations from the very beginning without modifying the
9344 // input LLVM IR.
9345 static bool processLoopInVPlanNativePath(
9346     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9347     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9348     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9349     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9350     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
9351 
9352   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9353     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9354     return false;
9355   }
9356   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9357   Function *F = L->getHeader()->getParent();
9358   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9359 
9360   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9361       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9362 
9363   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9364                                 &Hints, IAI);
9365   // Use the planner for outer loop vectorization.
9366   // TODO: CM is not used at this point inside the planner. Turn CM into an
9367   // optional argument if we don't need it in the future.
9368   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
9369 
9370   // Get user vectorization factor.
9371   ElementCount UserVF = Hints.getWidth();
9372 
9373   // Plan how to best vectorize, return the best VF and its cost.
9374   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9375 
9376   // If we are stress testing VPlan builds, do not attempt to generate vector
9377   // code. Masked vector code generation support will follow soon.
9378   // Also, do not attempt to vectorize if no vector code will be produced.
9379   if (VPlanBuildStressTest || EnableVPlanPredication ||
9380       VectorizationFactor::Disabled() == VF)
9381     return false;
9382 
9383   LVP.setBestPlan(VF.Width, 1);
9384 
9385   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9386                          &CM, BFI, PSI);
9387   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9388                     << L->getHeader()->getParent()->getName() << "\"\n");
9389   LVP.executePlan(LB, DT);
9390 
9391   // Mark the loop as already vectorized to avoid vectorizing again.
9392   Hints.setAlreadyVectorized();
9393 
9394   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9395   return true;
9396 }
9397 
9398 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9399     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9400                                !EnableLoopInterleaving),
9401       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9402                               !EnableLoopVectorization) {}
9403 
9404 bool LoopVectorizePass::processLoop(Loop *L) {
9405   assert((EnableVPlanNativePath || L->isInnermost()) &&
9406          "VPlan-native path is not enabled. Only process inner loops.");
9407 
9408 #ifndef NDEBUG
9409   const std::string DebugLocStr = getDebugLocString(L);
9410 #endif /* NDEBUG */
9411 
9412   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9413                     << L->getHeader()->getParent()->getName() << "\" from "
9414                     << DebugLocStr << "\n");
9415 
9416   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9417 
9418   LLVM_DEBUG(
9419       dbgs() << "LV: Loop hints:"
9420              << " force="
9421              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9422                      ? "disabled"
9423                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9424                             ? "enabled"
9425                             : "?"))
9426              << " width=" << Hints.getWidth()
9427              << " unroll=" << Hints.getInterleave() << "\n");
9428 
9429   // Function containing loop
9430   Function *F = L->getHeader()->getParent();
9431 
9432   // Looking at the diagnostic output is the only way to determine if a loop
9433   // was vectorized (other than looking at the IR or machine code), so it
9434   // is important to generate an optimization remark for each loop. Most of
9435   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9436   // generated as OptimizationRemark and OptimizationRemarkMissed are
9437   // less verbose reporting vectorized loops and unvectorized loops that may
9438   // benefit from vectorization, respectively.
9439 
9440   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9441     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9442     return false;
9443   }
9444 
9445   PredicatedScalarEvolution PSE(*SE, *L);
9446 
9447   // Check if it is legal to vectorize the loop.
9448   LoopVectorizationRequirements Requirements(*ORE);
9449   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9450                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9451   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9452     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9453     Hints.emitRemarkWithHints();
9454     return false;
9455   }
9456 
9457   // Check the function attributes and profiles to find out if this function
9458   // should be optimized for size.
9459   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9460       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9461 
9462   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9463   // here. They may require CFG and instruction level transformations before
9464   // even evaluating whether vectorization is profitable. Since we cannot modify
9465   // the incoming IR, we need to build VPlan upfront in the vectorization
9466   // pipeline.
9467   if (!L->isInnermost())
9468     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9469                                         ORE, BFI, PSI, Hints);
9470 
9471   assert(L->isInnermost() && "Inner loop expected.");
9472 
9473   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9474   // count by optimizing for size, to minimize overheads.
9475   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9476   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9477     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9478                       << "This loop is worth vectorizing only if no scalar "
9479                       << "iteration overheads are incurred.");
9480     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9481       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9482     else {
9483       LLVM_DEBUG(dbgs() << "\n");
9484       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9485     }
9486   }
9487 
9488   // Check the function attributes to see if implicit floats are allowed.
9489   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9490   // an integer loop and the vector instructions selected are purely integer
9491   // vector instructions?
9492   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9493     reportVectorizationFailure(
9494         "Can't vectorize when the NoImplicitFloat attribute is used",
9495         "loop not vectorized due to NoImplicitFloat attribute",
9496         "NoImplicitFloat", ORE, L);
9497     Hints.emitRemarkWithHints();
9498     return false;
9499   }
9500 
9501   // Check if the target supports potentially unsafe FP vectorization.
9502   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9503   // for the target we're vectorizing for, to make sure none of the
9504   // additional fp-math flags can help.
9505   if (Hints.isPotentiallyUnsafe() &&
9506       TTI->isFPVectorizationPotentiallyUnsafe()) {
9507     reportVectorizationFailure(
9508         "Potentially unsafe FP op prevents vectorization",
9509         "loop not vectorized due to unsafe FP support.",
9510         "UnsafeFP", ORE, L);
9511     Hints.emitRemarkWithHints();
9512     return false;
9513   }
9514 
9515   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9516   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9517 
9518   // If an override option has been passed in for interleaved accesses, use it.
9519   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9520     UseInterleaved = EnableInterleavedMemAccesses;
9521 
9522   // Analyze interleaved memory accesses.
9523   if (UseInterleaved) {
9524     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
9525   }
9526 
9527   // Use the cost model.
9528   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9529                                 F, &Hints, IAI);
9530   CM.collectValuesToIgnore();
9531 
9532   // Use the planner for vectorization.
9533   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
9534 
9535   // Get user vectorization factor and interleave count.
9536   ElementCount UserVF = Hints.getWidth();
9537   unsigned UserIC = Hints.getInterleave();
9538 
9539   // Plan how to best vectorize, return the best VF and its cost.
9540   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
9541 
9542   VectorizationFactor VF = VectorizationFactor::Disabled();
9543   unsigned IC = 1;
9544 
9545   if (MaybeVF) {
9546     VF = *MaybeVF;
9547     // Select the interleave count.
9548     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
9549   }
9550 
9551   // Identify the diagnostic messages that should be produced.
9552   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9553   bool VectorizeLoop = true, InterleaveLoop = true;
9554   if (Requirements.doesNotMeet(F, L, Hints)) {
9555     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
9556                          "requirements.\n");
9557     Hints.emitRemarkWithHints();
9558     return false;
9559   }
9560 
9561   if (VF.Width.isScalar()) {
9562     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9563     VecDiagMsg = std::make_pair(
9564         "VectorizationNotBeneficial",
9565         "the cost-model indicates that vectorization is not beneficial");
9566     VectorizeLoop = false;
9567   }
9568 
9569   if (!MaybeVF && UserIC > 1) {
9570     // Tell the user interleaving was avoided up-front, despite being explicitly
9571     // requested.
9572     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9573                          "interleaving should be avoided up front\n");
9574     IntDiagMsg = std::make_pair(
9575         "InterleavingAvoided",
9576         "Ignoring UserIC, because interleaving was avoided up front");
9577     InterleaveLoop = false;
9578   } else if (IC == 1 && UserIC <= 1) {
9579     // Tell the user interleaving is not beneficial.
9580     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9581     IntDiagMsg = std::make_pair(
9582         "InterleavingNotBeneficial",
9583         "the cost-model indicates that interleaving is not beneficial");
9584     InterleaveLoop = false;
9585     if (UserIC == 1) {
9586       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9587       IntDiagMsg.second +=
9588           " and is explicitly disabled or interleave count is set to 1";
9589     }
9590   } else if (IC > 1 && UserIC == 1) {
9591     // Tell the user interleaving is beneficial, but it explicitly disabled.
9592     LLVM_DEBUG(
9593         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
9594     IntDiagMsg = std::make_pair(
9595         "InterleavingBeneficialButDisabled",
9596         "the cost-model indicates that interleaving is beneficial "
9597         "but is explicitly disabled or interleave count is set to 1");
9598     InterleaveLoop = false;
9599   }
9600 
9601   // Override IC if user provided an interleave count.
9602   IC = UserIC > 0 ? UserIC : IC;
9603 
9604   // Emit diagnostic messages, if any.
9605   const char *VAPassName = Hints.vectorizeAnalysisPassName();
9606   if (!VectorizeLoop && !InterleaveLoop) {
9607     // Do not vectorize or interleaving the loop.
9608     ORE->emit([&]() {
9609       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9610                                       L->getStartLoc(), L->getHeader())
9611              << VecDiagMsg.second;
9612     });
9613     ORE->emit([&]() {
9614       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9615                                       L->getStartLoc(), L->getHeader())
9616              << IntDiagMsg.second;
9617     });
9618     return false;
9619   } else if (!VectorizeLoop && InterleaveLoop) {
9620     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9621     ORE->emit([&]() {
9622       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9623                                         L->getStartLoc(), L->getHeader())
9624              << VecDiagMsg.second;
9625     });
9626   } else if (VectorizeLoop && !InterleaveLoop) {
9627     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9628                       << ") in " << DebugLocStr << '\n');
9629     ORE->emit([&]() {
9630       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9631                                         L->getStartLoc(), L->getHeader())
9632              << IntDiagMsg.second;
9633     });
9634   } else if (VectorizeLoop && InterleaveLoop) {
9635     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9636                       << ") in " << DebugLocStr << '\n');
9637     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9638   }
9639 
9640   LVP.setBestPlan(VF.Width, IC);
9641 
9642   using namespace ore;
9643   bool DisableRuntimeUnroll = false;
9644   MDNode *OrigLoopID = L->getLoopID();
9645 
9646   if (!VectorizeLoop) {
9647     assert(IC > 1 && "interleave count should not be 1 or 0");
9648     // If we decided that it is not legal to vectorize the loop, then
9649     // interleave it.
9650     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM,
9651                                BFI, PSI);
9652     LVP.executePlan(Unroller, DT);
9653 
9654     ORE->emit([&]() {
9655       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9656                                 L->getHeader())
9657              << "interleaved loop (interleaved count: "
9658              << NV("InterleaveCount", IC) << ")";
9659     });
9660   } else {
9661     // If we decided that it is *legal* to vectorize the loop, then do it.
9662 
9663     // Consider vectorizing the epilogue too if it's profitable.
9664     VectorizationFactor EpilogueVF =
9665       CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
9666     if (EpilogueVF.Width.isVector()) {
9667 
9668       // The first pass vectorizes the main loop and creates a scalar epilogue
9669       // to be vectorized by executing the plan (potentially with a different
9670       // factor) again shortly afterwards.
9671       EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
9672                                         EpilogueVF.Width.getKnownMinValue(), 1);
9673       EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI,
9674                                          &LVL, &CM, BFI, PSI);
9675 
9676       LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
9677       LVP.executePlan(MainILV, DT);
9678       ++LoopsVectorized;
9679 
9680       simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9681       formLCSSARecursively(*L, *DT, LI, SE);
9682 
9683       // Second pass vectorizes the epilogue and adjusts the control flow
9684       // edges from the first pass.
9685       LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
9686       EPI.MainLoopVF = EPI.EpilogueVF;
9687       EPI.MainLoopUF = EPI.EpilogueUF;
9688       EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
9689                                                ORE, EPI, &LVL, &CM, BFI, PSI);
9690       LVP.executePlan(EpilogILV, DT);
9691       ++LoopsEpilogueVectorized;
9692 
9693       if (!MainILV.areSafetyChecksAdded())
9694         DisableRuntimeUnroll = true;
9695     } else {
9696       InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
9697                              &LVL, &CM, BFI, PSI);
9698       LVP.executePlan(LB, DT);
9699       ++LoopsVectorized;
9700 
9701       // Add metadata to disable runtime unrolling a scalar loop when there are
9702       // no runtime checks about strides and memory. A scalar loop that is
9703       // rarely used is not worth unrolling.
9704       if (!LB.areSafetyChecksAdded())
9705         DisableRuntimeUnroll = true;
9706     }
9707 
9708     // Report the vectorization decision.
9709     ORE->emit([&]() {
9710       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
9711                                 L->getHeader())
9712              << "vectorized loop (vectorization width: "
9713              << NV("VectorizationFactor", VF.Width)
9714              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
9715     });
9716   }
9717 
9718   Optional<MDNode *> RemainderLoopID =
9719       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
9720                                       LLVMLoopVectorizeFollowupEpilogue});
9721   if (RemainderLoopID.hasValue()) {
9722     L->setLoopID(RemainderLoopID.getValue());
9723   } else {
9724     if (DisableRuntimeUnroll)
9725       AddRuntimeUnrollDisableMetaData(L);
9726 
9727     // Mark the loop as already vectorized to avoid vectorizing again.
9728     Hints.setAlreadyVectorized();
9729   }
9730 
9731   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9732   return true;
9733 }
9734 
9735 LoopVectorizeResult LoopVectorizePass::runImpl(
9736     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
9737     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
9738     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
9739     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
9740     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
9741   SE = &SE_;
9742   LI = &LI_;
9743   TTI = &TTI_;
9744   DT = &DT_;
9745   BFI = &BFI_;
9746   TLI = TLI_;
9747   AA = &AA_;
9748   AC = &AC_;
9749   GetLAA = &GetLAA_;
9750   DB = &DB_;
9751   ORE = &ORE_;
9752   PSI = PSI_;
9753 
9754   // Don't attempt if
9755   // 1. the target claims to have no vector registers, and
9756   // 2. interleaving won't help ILP.
9757   //
9758   // The second condition is necessary because, even if the target has no
9759   // vector registers, loop vectorization may still enable scalar
9760   // interleaving.
9761   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9762       TTI->getMaxInterleaveFactor(1) < 2)
9763     return LoopVectorizeResult(false, false);
9764 
9765   bool Changed = false, CFGChanged = false;
9766 
9767   // The vectorizer requires loops to be in simplified form.
9768   // Since simplification may add new inner loops, it has to run before the
9769   // legality and profitability checks. This means running the loop vectorizer
9770   // will simplify all loops, regardless of whether anything end up being
9771   // vectorized.
9772   for (auto &L : *LI)
9773     Changed |= CFGChanged |=
9774         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9775 
9776   // Build up a worklist of inner-loops to vectorize. This is necessary as
9777   // the act of vectorizing or partially unrolling a loop creates new loops
9778   // and can invalidate iterators across the loops.
9779   SmallVector<Loop *, 8> Worklist;
9780 
9781   for (Loop *L : *LI)
9782     collectSupportedLoops(*L, LI, ORE, Worklist);
9783 
9784   LoopsAnalyzed += Worklist.size();
9785 
9786   // Now walk the identified inner loops.
9787   while (!Worklist.empty()) {
9788     Loop *L = Worklist.pop_back_val();
9789 
9790     // For the inner loops we actually process, form LCSSA to simplify the
9791     // transform.
9792     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9793 
9794     Changed |= CFGChanged |= processLoop(L);
9795   }
9796 
9797   // Process each loop nest in the function.
9798   return LoopVectorizeResult(Changed, CFGChanged);
9799 }
9800 
9801 PreservedAnalyses LoopVectorizePass::run(Function &F,
9802                                          FunctionAnalysisManager &AM) {
9803     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
9804     auto &LI = AM.getResult<LoopAnalysis>(F);
9805     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
9806     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
9807     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
9808     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
9809     auto &AA = AM.getResult<AAManager>(F);
9810     auto &AC = AM.getResult<AssumptionAnalysis>(F);
9811     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
9812     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
9813     MemorySSA *MSSA = EnableMSSALoopDependency
9814                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
9815                           : nullptr;
9816 
9817     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
9818     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
9819         [&](Loop &L) -> const LoopAccessInfo & {
9820       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
9821                                         TLI, TTI, nullptr, MSSA};
9822       return LAM.getResult<LoopAccessAnalysis>(L, AR);
9823     };
9824     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9825     ProfileSummaryInfo *PSI =
9826         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9827     LoopVectorizeResult Result =
9828         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
9829     if (!Result.MadeAnyChange)
9830       return PreservedAnalyses::all();
9831     PreservedAnalyses PA;
9832 
9833     // We currently do not preserve loopinfo/dominator analyses with outer loop
9834     // vectorization. Until this is addressed, mark these analyses as preserved
9835     // only for non-VPlan-native path.
9836     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
9837     if (!EnableVPlanNativePath) {
9838       PA.preserve<LoopAnalysis>();
9839       PA.preserve<DominatorTreeAnalysis>();
9840     }
9841     PA.preserve<BasicAA>();
9842     PA.preserve<GlobalsAA>();
9843     if (!Result.MadeCFGChange)
9844       PA.preserveSet<CFGAnalyses>();
9845     return PA;
9846 }
9847