1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallVector.h"
74 #include "llvm/ADT/Statistic.h"
75 #include "llvm/ADT/StringRef.h"
76 #include "llvm/ADT/Twine.h"
77 #include "llvm/ADT/iterator_range.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/BasicAliasAnalysis.h"
80 #include "llvm/Analysis/BlockFrequencyInfo.h"
81 #include "llvm/Analysis/CFG.h"
82 #include "llvm/Analysis/CodeMetrics.h"
83 #include "llvm/Analysis/DemandedBits.h"
84 #include "llvm/Analysis/GlobalsModRef.h"
85 #include "llvm/Analysis/LoopAccessAnalysis.h"
86 #include "llvm/Analysis/LoopAnalysisManager.h"
87 #include "llvm/Analysis/LoopInfo.h"
88 #include "llvm/Analysis/LoopIterator.h"
89 #include "llvm/Analysis/MemorySSA.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 #ifndef NDEBUG
161 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
162 #endif
163 
164 /// @{
165 /// Metadata attribute names
166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
167 const char LLVMLoopVectorizeFollowupVectorized[] =
168     "llvm.loop.vectorize.followup_vectorized";
169 const char LLVMLoopVectorizeFollowupEpilogue[] =
170     "llvm.loop.vectorize.followup_epilogue";
171 /// @}
172 
173 STATISTIC(LoopsVectorized, "Number of loops vectorized");
174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
176 
177 static cl::opt<bool> EnableEpilogueVectorization(
178     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
179     cl::desc("Enable vectorization of epilogue loops."));
180 
181 static cl::opt<unsigned> EpilogueVectorizationForceVF(
182     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
183     cl::desc("When epilogue vectorization is enabled, and a value greater than "
184              "1 is specified, forces the given VF for all applicable epilogue "
185              "loops."));
186 
187 static cl::opt<unsigned> EpilogueVectorizationMinVF(
188     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
189     cl::desc("Only loops with vectorization factor equal to or larger than "
190              "the specified value are considered for epilogue vectorization."));
191 
192 /// Loops with a known constant trip count below this number are vectorized only
193 /// if no scalar iteration overheads are incurred.
194 static cl::opt<unsigned> TinyTripCountVectorThreshold(
195     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
196     cl::desc("Loops with a constant trip count that is smaller than this "
197              "value are vectorized only if no scalar iteration overheads "
198              "are incurred."));
199 
200 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
201 // that predication is preferred, and this lists all options. I.e., the
202 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
203 // and predicate the instructions accordingly. If tail-folding fails, there are
204 // different fallback strategies depending on these values:
205 namespace PreferPredicateTy {
206   enum Option {
207     ScalarEpilogue = 0,
208     PredicateElseScalarEpilogue,
209     PredicateOrDontVectorize
210   };
211 } // namespace PreferPredicateTy
212 
213 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
214     "prefer-predicate-over-epilogue",
215     cl::init(PreferPredicateTy::ScalarEpilogue),
216     cl::Hidden,
217     cl::desc("Tail-folding and predication preferences over creating a scalar "
218              "epilogue loop."),
219     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
220                          "scalar-epilogue",
221                          "Don't tail-predicate loops, create scalar epilogue"),
222               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
223                          "predicate-else-scalar-epilogue",
224                          "prefer tail-folding, create scalar epilogue if tail "
225                          "folding fails."),
226               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
227                          "predicate-dont-vectorize",
228                          "prefers tail-folding, don't attempt vectorization if "
229                          "tail-folding fails.")));
230 
231 static cl::opt<bool> MaximizeBandwidth(
232     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
233     cl::desc("Maximize bandwidth when selecting vectorization factor which "
234              "will be determined by the smallest type in loop."));
235 
236 static cl::opt<bool> EnableInterleavedMemAccesses(
237     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
238     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
239 
240 /// An interleave-group may need masking if it resides in a block that needs
241 /// predication, or in order to mask away gaps.
242 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
243     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
245 
246 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
247     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
248     cl::desc("We don't interleave loops with a estimated constant trip count "
249              "below this number"));
250 
251 static cl::opt<unsigned> ForceTargetNumScalarRegs(
252     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
253     cl::desc("A flag that overrides the target's number of scalar registers."));
254 
255 static cl::opt<unsigned> ForceTargetNumVectorRegs(
256     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
257     cl::desc("A flag that overrides the target's number of vector registers."));
258 
259 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
260     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
261     cl::desc("A flag that overrides the target's max interleave factor for "
262              "scalar loops."));
263 
264 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
265     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
266     cl::desc("A flag that overrides the target's max interleave factor for "
267              "vectorized loops."));
268 
269 static cl::opt<unsigned> ForceTargetInstructionCost(
270     "force-target-instruction-cost", cl::init(0), cl::Hidden,
271     cl::desc("A flag that overrides the target's expected cost for "
272              "an instruction to a single constant value. Mostly "
273              "useful for getting consistent testing."));
274 
275 static cl::opt<bool> ForceTargetSupportsScalableVectors(
276     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
277     cl::desc(
278         "Pretend that scalable vectors are supported, even if the target does "
279         "not support them. This flag should only be used for testing."));
280 
281 static cl::opt<unsigned> SmallLoopCost(
282     "small-loop-cost", cl::init(20), cl::Hidden,
283     cl::desc(
284         "The cost of a loop that is considered 'small' by the interleaver."));
285 
286 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
287     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
288     cl::desc("Enable the use of the block frequency analysis to access PGO "
289              "heuristics minimizing code growth in cold regions and being more "
290              "aggressive in hot regions."));
291 
292 // Runtime interleave loops for load/store throughput.
293 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
294     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
295     cl::desc(
296         "Enable runtime interleaving until load/store ports are saturated"));
297 
298 /// Interleave small loops with scalar reductions.
299 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
300     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
301     cl::desc("Enable interleaving for loops with small iteration counts that "
302              "contain scalar reductions to expose ILP."));
303 
304 /// The number of stores in a loop that are allowed to need predication.
305 static cl::opt<unsigned> NumberOfStoresToPredicate(
306     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
307     cl::desc("Max number of stores to be predicated behind an if."));
308 
309 static cl::opt<bool> EnableIndVarRegisterHeur(
310     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
311     cl::desc("Count the induction variable only once when interleaving"));
312 
313 static cl::opt<bool> EnableCondStoresVectorization(
314     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
315     cl::desc("Enable if predication of stores during vectorization."));
316 
317 static cl::opt<unsigned> MaxNestedScalarReductionIC(
318     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
319     cl::desc("The maximum interleave count to use when interleaving a scalar "
320              "reduction in a nested loop."));
321 
322 static cl::opt<bool>
323     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
324                            cl::Hidden,
325                            cl::desc("Prefer in-loop vector reductions, "
326                                     "overriding the targets preference."));
327 
328 static cl::opt<bool> PreferPredicatedReductionSelect(
329     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
330     cl::desc(
331         "Prefer predicating a reduction operation over an after loop select."));
332 
333 cl::opt<bool> EnableVPlanNativePath(
334     "enable-vplan-native-path", cl::init(false), cl::Hidden,
335     cl::desc("Enable VPlan-native vectorization path with "
336              "support for outer loop vectorization."));
337 
338 // FIXME: Remove this switch once we have divergence analysis. Currently we
339 // assume divergent non-backedge branches when this switch is true.
340 cl::opt<bool> EnableVPlanPredication(
341     "enable-vplan-predication", cl::init(false), cl::Hidden,
342     cl::desc("Enable VPlan-native vectorization path predicator with "
343              "support for outer loop vectorization."));
344 
345 // This flag enables the stress testing of the VPlan H-CFG construction in the
346 // VPlan-native vectorization path. It must be used in conjuction with
347 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
348 // verification of the H-CFGs built.
349 static cl::opt<bool> VPlanBuildStressTest(
350     "vplan-build-stress-test", cl::init(false), cl::Hidden,
351     cl::desc(
352         "Build VPlan for every supported loop nest in the function and bail "
353         "out right after the build (stress test the VPlan H-CFG construction "
354         "in the VPlan-native vectorization path)."));
355 
356 cl::opt<bool> llvm::EnableLoopInterleaving(
357     "interleave-loops", cl::init(true), cl::Hidden,
358     cl::desc("Enable loop interleaving in Loop vectorization passes"));
359 cl::opt<bool> llvm::EnableLoopVectorization(
360     "vectorize-loops", cl::init(true), cl::Hidden,
361     cl::desc("Run the Loop vectorization passes"));
362 
363 /// A helper function that returns the type of loaded or stored value.
364 static Type *getMemInstValueType(Value *I) {
365   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
366          "Expected Load or Store instruction");
367   if (auto *LI = dyn_cast<LoadInst>(I))
368     return LI->getType();
369   return cast<StoreInst>(I)->getValueOperand()->getType();
370 }
371 
372 /// A helper function that returns true if the given type is irregular. The
373 /// type is irregular if its allocated size doesn't equal the store size of an
374 /// element of the corresponding vector type at the given vectorization factor.
375 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) {
376   // Determine if an array of VF elements of type Ty is "bitcast compatible"
377   // with a <VF x Ty> vector.
378   if (VF.isVector()) {
379     auto *VectorTy = VectorType::get(Ty, VF);
380     return TypeSize::get(VF.getKnownMinValue() *
381                              DL.getTypeAllocSize(Ty).getFixedValue(),
382                          VF.isScalable()) != DL.getTypeStoreSize(VectorTy);
383   }
384 
385   // If the vectorization factor is one, we just check if an array of type Ty
386   // requires padding between elements.
387   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
388 }
389 
390 /// A helper function that returns the reciprocal of the block probability of
391 /// predicated blocks. If we return X, we are assuming the predicated block
392 /// will execute once for every X iterations of the loop header.
393 ///
394 /// TODO: We should use actual block probability here, if available. Currently,
395 ///       we always assume predicated blocks have a 50% chance of executing.
396 static unsigned getReciprocalPredBlockProb() { return 2; }
397 
398 /// A helper function that returns an integer or floating-point constant with
399 /// value C.
400 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
401   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
402                            : ConstantFP::get(Ty, C);
403 }
404 
405 /// Returns "best known" trip count for the specified loop \p L as defined by
406 /// the following procedure:
407 ///   1) Returns exact trip count if it is known.
408 ///   2) Returns expected trip count according to profile data if any.
409 ///   3) Returns upper bound estimate if it is known.
410 ///   4) Returns None if all of the above failed.
411 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
412   // Check if exact trip count is known.
413   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
414     return ExpectedTC;
415 
416   // Check if there is an expected trip count available from profile data.
417   if (LoopVectorizeWithBlockFrequency)
418     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
419       return EstimatedTC;
420 
421   // Check if upper bound estimate is known.
422   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
423     return ExpectedTC;
424 
425   return None;
426 }
427 
428 // Forward declare GeneratedRTChecks.
429 class GeneratedRTChecks;
430 
431 namespace llvm {
432 
433 /// InnerLoopVectorizer vectorizes loops which contain only one basic
434 /// block to a specified vectorization factor (VF).
435 /// This class performs the widening of scalars into vectors, or multiple
436 /// scalars. This class also implements the following features:
437 /// * It inserts an epilogue loop for handling loops that don't have iteration
438 ///   counts that are known to be a multiple of the vectorization factor.
439 /// * It handles the code generation for reduction variables.
440 /// * Scalarization (implementation using scalars) of un-vectorizable
441 ///   instructions.
442 /// InnerLoopVectorizer does not perform any vectorization-legality
443 /// checks, and relies on the caller to check for the different legality
444 /// aspects. The InnerLoopVectorizer relies on the
445 /// LoopVectorizationLegality class to provide information about the induction
446 /// and reduction variables that were found to a given vectorization factor.
447 class InnerLoopVectorizer {
448 public:
449   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
450                       LoopInfo *LI, DominatorTree *DT,
451                       const TargetLibraryInfo *TLI,
452                       const TargetTransformInfo *TTI, AssumptionCache *AC,
453                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
454                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
455                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
456                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
457       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
458         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
459         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
460         PSI(PSI), RTChecks(RTChecks) {
461     // Query this against the original loop and save it here because the profile
462     // of the original loop header may change as the transformation happens.
463     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
464         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
465   }
466 
467   virtual ~InnerLoopVectorizer() = default;
468 
469   /// Create a new empty loop that will contain vectorized instructions later
470   /// on, while the old loop will be used as the scalar remainder. Control flow
471   /// is generated around the vectorized (and scalar epilogue) loops consisting
472   /// of various checks and bypasses. Return the pre-header block of the new
473   /// loop.
474   /// In the case of epilogue vectorization, this function is overriden to
475   /// handle the more complex control flow around the loops.
476   virtual BasicBlock *createVectorizedLoopSkeleton();
477 
478   /// Widen a single instruction within the innermost loop.
479   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
480                         VPTransformState &State);
481 
482   /// Widen a single call instruction within the innermost loop.
483   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
484                             VPTransformState &State);
485 
486   /// Widen a single select instruction within the innermost loop.
487   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
488                               bool InvariantCond, VPTransformState &State);
489 
490   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
491   void fixVectorizedLoop(VPTransformState &State);
492 
493   // Return true if any runtime check is added.
494   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
495 
496   /// A type for vectorized values in the new loop. Each value from the
497   /// original loop, when vectorized, is represented by UF vector values in the
498   /// new unrolled loop, where UF is the unroll factor.
499   using VectorParts = SmallVector<Value *, 2>;
500 
501   /// Vectorize a single GetElementPtrInst based on information gathered and
502   /// decisions taken during planning.
503   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
504                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
505                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
506 
507   /// Vectorize a single PHINode in a block. This method handles the induction
508   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
509   /// arbitrary length vectors.
510   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
511                            VPValue *StartV, VPValue *Def,
512                            VPTransformState &State);
513 
514   /// A helper function to scalarize a single Instruction in the innermost loop.
515   /// Generates a sequence of scalar instances for each lane between \p MinLane
516   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
517   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
518   /// Instr's operands.
519   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
520                             const VPIteration &Instance, bool IfPredicateInstr,
521                             VPTransformState &State);
522 
523   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
524   /// is provided, the integer induction variable will first be truncated to
525   /// the corresponding type.
526   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
527                              VPValue *Def, VPValue *CastDef,
528                              VPTransformState &State);
529 
530   /// Construct the vector value of a scalarized value \p V one lane at a time.
531   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
532                                  VPTransformState &State);
533 
534   /// Try to vectorize interleaved access group \p Group with the base address
535   /// given in \p Addr, optionally masking the vector operations if \p
536   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
537   /// values in the vectorized loop.
538   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
539                                 ArrayRef<VPValue *> VPDefs,
540                                 VPTransformState &State, VPValue *Addr,
541                                 ArrayRef<VPValue *> StoredValues,
542                                 VPValue *BlockInMask = nullptr);
543 
544   /// Vectorize Load and Store instructions with the base address given in \p
545   /// Addr, optionally masking the vector operations if \p BlockInMask is
546   /// non-null. Use \p State to translate given VPValues to IR values in the
547   /// vectorized loop.
548   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
549                                   VPValue *Def, VPValue *Addr,
550                                   VPValue *StoredValue, VPValue *BlockInMask);
551 
552   /// Set the debug location in the builder using the debug location in
553   /// the instruction.
554   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
555 
556   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
557   void fixNonInductionPHIs(VPTransformState &State);
558 
559   /// Create a broadcast instruction. This method generates a broadcast
560   /// instruction (shuffle) for loop invariant values and for the induction
561   /// value. If this is the induction variable then we extend it to N, N+1, ...
562   /// this is needed because each iteration in the loop corresponds to a SIMD
563   /// element.
564   virtual Value *getBroadcastInstrs(Value *V);
565 
566 protected:
567   friend class LoopVectorizationPlanner;
568 
569   /// A small list of PHINodes.
570   using PhiVector = SmallVector<PHINode *, 4>;
571 
572   /// A type for scalarized values in the new loop. Each value from the
573   /// original loop, when scalarized, is represented by UF x VF scalar values
574   /// in the new unrolled loop, where UF is the unroll factor and VF is the
575   /// vectorization factor.
576   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
577 
578   /// Set up the values of the IVs correctly when exiting the vector loop.
579   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
580                     Value *CountRoundDown, Value *EndValue,
581                     BasicBlock *MiddleBlock);
582 
583   /// Create a new induction variable inside L.
584   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
585                                    Value *Step, Instruction *DL);
586 
587   /// Handle all cross-iteration phis in the header.
588   void fixCrossIterationPHIs(VPTransformState &State);
589 
590   /// Fix a first-order recurrence. This is the second phase of vectorizing
591   /// this phi node.
592   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
593 
594   /// Fix a reduction cross-iteration phi. This is the second phase of
595   /// vectorizing this phi node.
596   void fixReduction(PHINode *Phi, VPTransformState &State);
597 
598   /// Clear NSW/NUW flags from reduction instructions if necessary.
599   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
600                                VPTransformState &State);
601 
602   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
603   /// means we need to add the appropriate incoming value from the middle
604   /// block as exiting edges from the scalar epilogue loop (if present) are
605   /// already in place, and we exit the vector loop exclusively to the middle
606   /// block.
607   void fixLCSSAPHIs(VPTransformState &State);
608 
609   /// Iteratively sink the scalarized operands of a predicated instruction into
610   /// the block that was created for it.
611   void sinkScalarOperands(Instruction *PredInst);
612 
613   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
614   /// represented as.
615   void truncateToMinimalBitwidths(VPTransformState &State);
616 
617   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
618   /// to each vector element of Val. The sequence starts at StartIndex.
619   /// \p Opcode is relevant for FP induction variable.
620   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
621                                Instruction::BinaryOps Opcode =
622                                Instruction::BinaryOpsEnd);
623 
624   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
625   /// variable on which to base the steps, \p Step is the size of the step, and
626   /// \p EntryVal is the value from the original loop that maps to the steps.
627   /// Note that \p EntryVal doesn't have to be an induction variable - it
628   /// can also be a truncate instruction.
629   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
630                         const InductionDescriptor &ID, VPValue *Def,
631                         VPValue *CastDef, VPTransformState &State);
632 
633   /// Create a vector induction phi node based on an existing scalar one. \p
634   /// EntryVal is the value from the original loop that maps to the vector phi
635   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
636   /// truncate instruction, instead of widening the original IV, we widen a
637   /// version of the IV truncated to \p EntryVal's type.
638   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
639                                        Value *Step, Value *Start,
640                                        Instruction *EntryVal, VPValue *Def,
641                                        VPValue *CastDef,
642                                        VPTransformState &State);
643 
644   /// Returns true if an instruction \p I should be scalarized instead of
645   /// vectorized for the chosen vectorization factor.
646   bool shouldScalarizeInstruction(Instruction *I) const;
647 
648   /// Returns true if we should generate a scalar version of \p IV.
649   bool needsScalarInduction(Instruction *IV) const;
650 
651   /// If there is a cast involved in the induction variable \p ID, which should
652   /// be ignored in the vectorized loop body, this function records the
653   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
654   /// cast. We had already proved that the casted Phi is equal to the uncasted
655   /// Phi in the vectorized loop (under a runtime guard), and therefore
656   /// there is no need to vectorize the cast - the same value can be used in the
657   /// vector loop for both the Phi and the cast.
658   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
659   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
660   ///
661   /// \p EntryVal is the value from the original loop that maps to the vector
662   /// phi node and is used to distinguish what is the IV currently being
663   /// processed - original one (if \p EntryVal is a phi corresponding to the
664   /// original IV) or the "newly-created" one based on the proof mentioned above
665   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
666   /// latter case \p EntryVal is a TruncInst and we must not record anything for
667   /// that IV, but it's error-prone to expect callers of this routine to care
668   /// about that, hence this explicit parameter.
669   void recordVectorLoopValueForInductionCast(
670       const InductionDescriptor &ID, const Instruction *EntryVal,
671       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
672       unsigned Part, unsigned Lane = UINT_MAX);
673 
674   /// Generate a shuffle sequence that will reverse the vector Vec.
675   virtual Value *reverseVector(Value *Vec);
676 
677   /// Returns (and creates if needed) the original loop trip count.
678   Value *getOrCreateTripCount(Loop *NewLoop);
679 
680   /// Returns (and creates if needed) the trip count of the widened loop.
681   Value *getOrCreateVectorTripCount(Loop *NewLoop);
682 
683   /// Returns a bitcasted value to the requested vector type.
684   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
685   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
686                                 const DataLayout &DL);
687 
688   /// Emit a bypass check to see if the vector trip count is zero, including if
689   /// it overflows.
690   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
691 
692   /// Emit a bypass check to see if all of the SCEV assumptions we've
693   /// had to make are correct. Returns the block containing the checks or
694   /// nullptr if no checks have been added.
695   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
696 
697   /// Emit bypass checks to check any memory assumptions we may have made.
698   /// Returns the block containing the checks or nullptr if no checks have been
699   /// added.
700   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
701 
702   /// Compute the transformed value of Index at offset StartValue using step
703   /// StepValue.
704   /// For integer induction, returns StartValue + Index * StepValue.
705   /// For pointer induction, returns StartValue[Index * StepValue].
706   /// FIXME: The newly created binary instructions should contain nsw/nuw
707   /// flags, which can be found from the original scalar operations.
708   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
709                               const DataLayout &DL,
710                               const InductionDescriptor &ID) const;
711 
712   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
713   /// vector loop preheader, middle block and scalar preheader. Also
714   /// allocate a loop object for the new vector loop and return it.
715   Loop *createVectorLoopSkeleton(StringRef Prefix);
716 
717   /// Create new phi nodes for the induction variables to resume iteration count
718   /// in the scalar epilogue, from where the vectorized loop left off (given by
719   /// \p VectorTripCount).
720   /// In cases where the loop skeleton is more complicated (eg. epilogue
721   /// vectorization) and the resume values can come from an additional bypass
722   /// block, the \p AdditionalBypass pair provides information about the bypass
723   /// block and the end value on the edge from bypass to this loop.
724   void createInductionResumeValues(
725       Loop *L, Value *VectorTripCount,
726       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
727 
728   /// Complete the loop skeleton by adding debug MDs, creating appropriate
729   /// conditional branches in the middle block, preparing the builder and
730   /// running the verifier. Take in the vector loop \p L as argument, and return
731   /// the preheader of the completed vector loop.
732   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
733 
734   /// Add additional metadata to \p To that was not present on \p Orig.
735   ///
736   /// Currently this is used to add the noalias annotations based on the
737   /// inserted memchecks.  Use this for instructions that are *cloned* into the
738   /// vector loop.
739   void addNewMetadata(Instruction *To, const Instruction *Orig);
740 
741   /// Add metadata from one instruction to another.
742   ///
743   /// This includes both the original MDs from \p From and additional ones (\see
744   /// addNewMetadata).  Use this for *newly created* instructions in the vector
745   /// loop.
746   void addMetadata(Instruction *To, Instruction *From);
747 
748   /// Similar to the previous function but it adds the metadata to a
749   /// vector of instructions.
750   void addMetadata(ArrayRef<Value *> To, Instruction *From);
751 
752   /// Allow subclasses to override and print debug traces before/after vplan
753   /// execution, when trace information is requested.
754   virtual void printDebugTracesAtStart(){};
755   virtual void printDebugTracesAtEnd(){};
756 
757   /// The original loop.
758   Loop *OrigLoop;
759 
760   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
761   /// dynamic knowledge to simplify SCEV expressions and converts them to a
762   /// more usable form.
763   PredicatedScalarEvolution &PSE;
764 
765   /// Loop Info.
766   LoopInfo *LI;
767 
768   /// Dominator Tree.
769   DominatorTree *DT;
770 
771   /// Alias Analysis.
772   AAResults *AA;
773 
774   /// Target Library Info.
775   const TargetLibraryInfo *TLI;
776 
777   /// Target Transform Info.
778   const TargetTransformInfo *TTI;
779 
780   /// Assumption Cache.
781   AssumptionCache *AC;
782 
783   /// Interface to emit optimization remarks.
784   OptimizationRemarkEmitter *ORE;
785 
786   /// LoopVersioning.  It's only set up (non-null) if memchecks were
787   /// used.
788   ///
789   /// This is currently only used to add no-alias metadata based on the
790   /// memchecks.  The actually versioning is performed manually.
791   std::unique_ptr<LoopVersioning> LVer;
792 
793   /// The vectorization SIMD factor to use. Each vector will have this many
794   /// vector elements.
795   ElementCount VF;
796 
797   /// The vectorization unroll factor to use. Each scalar is vectorized to this
798   /// many different vector instructions.
799   unsigned UF;
800 
801   /// The builder that we use
802   IRBuilder<> Builder;
803 
804   // --- Vectorization state ---
805 
806   /// The vector-loop preheader.
807   BasicBlock *LoopVectorPreHeader;
808 
809   /// The scalar-loop preheader.
810   BasicBlock *LoopScalarPreHeader;
811 
812   /// Middle Block between the vector and the scalar.
813   BasicBlock *LoopMiddleBlock;
814 
815   /// The (unique) ExitBlock of the scalar loop.  Note that
816   /// there can be multiple exiting edges reaching this block.
817   BasicBlock *LoopExitBlock;
818 
819   /// The vector loop body.
820   BasicBlock *LoopVectorBody;
821 
822   /// The scalar loop body.
823   BasicBlock *LoopScalarBody;
824 
825   /// A list of all bypass blocks. The first block is the entry of the loop.
826   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
827 
828   /// The new Induction variable which was added to the new block.
829   PHINode *Induction = nullptr;
830 
831   /// The induction variable of the old basic block.
832   PHINode *OldInduction = nullptr;
833 
834   /// Store instructions that were predicated.
835   SmallVector<Instruction *, 4> PredicatedInstructions;
836 
837   /// Trip count of the original loop.
838   Value *TripCount = nullptr;
839 
840   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
841   Value *VectorTripCount = nullptr;
842 
843   /// The legality analysis.
844   LoopVectorizationLegality *Legal;
845 
846   /// The profitablity analysis.
847   LoopVectorizationCostModel *Cost;
848 
849   // Record whether runtime checks are added.
850   bool AddedSafetyChecks = false;
851 
852   // Holds the end values for each induction variable. We save the end values
853   // so we can later fix-up the external users of the induction variables.
854   DenseMap<PHINode *, Value *> IVEndValues;
855 
856   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
857   // fixed up at the end of vector code generation.
858   SmallVector<PHINode *, 8> OrigPHIsToFix;
859 
860   /// BFI and PSI are used to check for profile guided size optimizations.
861   BlockFrequencyInfo *BFI;
862   ProfileSummaryInfo *PSI;
863 
864   // Whether this loop should be optimized for size based on profile guided size
865   // optimizatios.
866   bool OptForSizeBasedOnProfile;
867 
868   /// Structure to hold information about generated runtime checks, responsible
869   /// for cleaning the checks, if vectorization turns out unprofitable.
870   GeneratedRTChecks &RTChecks;
871 };
872 
873 class InnerLoopUnroller : public InnerLoopVectorizer {
874 public:
875   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
876                     LoopInfo *LI, DominatorTree *DT,
877                     const TargetLibraryInfo *TLI,
878                     const TargetTransformInfo *TTI, AssumptionCache *AC,
879                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
880                     LoopVectorizationLegality *LVL,
881                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
882                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
883       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
884                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
885                             BFI, PSI, Check) {}
886 
887 private:
888   Value *getBroadcastInstrs(Value *V) override;
889   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
890                        Instruction::BinaryOps Opcode =
891                        Instruction::BinaryOpsEnd) override;
892   Value *reverseVector(Value *Vec) override;
893 };
894 
895 /// Encapsulate information regarding vectorization of a loop and its epilogue.
896 /// This information is meant to be updated and used across two stages of
897 /// epilogue vectorization.
898 struct EpilogueLoopVectorizationInfo {
899   ElementCount MainLoopVF = ElementCount::getFixed(0);
900   unsigned MainLoopUF = 0;
901   ElementCount EpilogueVF = ElementCount::getFixed(0);
902   unsigned EpilogueUF = 0;
903   BasicBlock *MainLoopIterationCountCheck = nullptr;
904   BasicBlock *EpilogueIterationCountCheck = nullptr;
905   BasicBlock *SCEVSafetyCheck = nullptr;
906   BasicBlock *MemSafetyCheck = nullptr;
907   Value *TripCount = nullptr;
908   Value *VectorTripCount = nullptr;
909 
910   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
911                                 unsigned EUF)
912       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
913         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
914     assert(EUF == 1 &&
915            "A high UF for the epilogue loop is likely not beneficial.");
916   }
917 };
918 
919 /// An extension of the inner loop vectorizer that creates a skeleton for a
920 /// vectorized loop that has its epilogue (residual) also vectorized.
921 /// The idea is to run the vplan on a given loop twice, firstly to setup the
922 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
923 /// from the first step and vectorize the epilogue.  This is achieved by
924 /// deriving two concrete strategy classes from this base class and invoking
925 /// them in succession from the loop vectorizer planner.
926 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
927 public:
928   InnerLoopAndEpilogueVectorizer(
929       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
930       DominatorTree *DT, const TargetLibraryInfo *TLI,
931       const TargetTransformInfo *TTI, AssumptionCache *AC,
932       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
933       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
934       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
935       GeneratedRTChecks &Checks)
936       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
937                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
938                             Checks),
939         EPI(EPI) {}
940 
941   // Override this function to handle the more complex control flow around the
942   // three loops.
943   BasicBlock *createVectorizedLoopSkeleton() final override {
944     return createEpilogueVectorizedLoopSkeleton();
945   }
946 
947   /// The interface for creating a vectorized skeleton using one of two
948   /// different strategies, each corresponding to one execution of the vplan
949   /// as described above.
950   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
951 
952   /// Holds and updates state information required to vectorize the main loop
953   /// and its epilogue in two separate passes. This setup helps us avoid
954   /// regenerating and recomputing runtime safety checks. It also helps us to
955   /// shorten the iteration-count-check path length for the cases where the
956   /// iteration count of the loop is so small that the main vector loop is
957   /// completely skipped.
958   EpilogueLoopVectorizationInfo &EPI;
959 };
960 
961 /// A specialized derived class of inner loop vectorizer that performs
962 /// vectorization of *main* loops in the process of vectorizing loops and their
963 /// epilogues.
964 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
965 public:
966   EpilogueVectorizerMainLoop(
967       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
968       DominatorTree *DT, const TargetLibraryInfo *TLI,
969       const TargetTransformInfo *TTI, AssumptionCache *AC,
970       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
971       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
972       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
973       GeneratedRTChecks &Check)
974       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
975                                        EPI, LVL, CM, BFI, PSI, Check) {}
976   /// Implements the interface for creating a vectorized skeleton using the
977   /// *main loop* strategy (ie the first pass of vplan execution).
978   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
979 
980 protected:
981   /// Emits an iteration count bypass check once for the main loop (when \p
982   /// ForEpilogue is false) and once for the epilogue loop (when \p
983   /// ForEpilogue is true).
984   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
985                                              bool ForEpilogue);
986   void printDebugTracesAtStart() override;
987   void printDebugTracesAtEnd() override;
988 };
989 
990 // A specialized derived class of inner loop vectorizer that performs
991 // vectorization of *epilogue* loops in the process of vectorizing loops and
992 // their epilogues.
993 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
994 public:
995   EpilogueVectorizerEpilogueLoop(
996       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
997       DominatorTree *DT, const TargetLibraryInfo *TLI,
998       const TargetTransformInfo *TTI, AssumptionCache *AC,
999       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1000       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1001       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1002       GeneratedRTChecks &Checks)
1003       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1004                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1005   /// Implements the interface for creating a vectorized skeleton using the
1006   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1007   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1008 
1009 protected:
1010   /// Emits an iteration count bypass check after the main vector loop has
1011   /// finished to see if there are any iterations left to execute by either
1012   /// the vector epilogue or the scalar epilogue.
1013   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1014                                                       BasicBlock *Bypass,
1015                                                       BasicBlock *Insert);
1016   void printDebugTracesAtStart() override;
1017   void printDebugTracesAtEnd() override;
1018 };
1019 } // end namespace llvm
1020 
1021 /// Look for a meaningful debug location on the instruction or it's
1022 /// operands.
1023 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1024   if (!I)
1025     return I;
1026 
1027   DebugLoc Empty;
1028   if (I->getDebugLoc() != Empty)
1029     return I;
1030 
1031   for (Use &Op : I->operands()) {
1032     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1033       if (OpInst->getDebugLoc() != Empty)
1034         return OpInst;
1035   }
1036 
1037   return I;
1038 }
1039 
1040 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1041   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1042     const DILocation *DIL = Inst->getDebugLoc();
1043     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1044         !isa<DbgInfoIntrinsic>(Inst)) {
1045       assert(!VF.isScalable() && "scalable vectors not yet supported.");
1046       auto NewDIL =
1047           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1048       if (NewDIL)
1049         B.SetCurrentDebugLocation(NewDIL.getValue());
1050       else
1051         LLVM_DEBUG(dbgs()
1052                    << "Failed to create new discriminator: "
1053                    << DIL->getFilename() << " Line: " << DIL->getLine());
1054     }
1055     else
1056       B.SetCurrentDebugLocation(DIL);
1057   } else
1058     B.SetCurrentDebugLocation(DebugLoc());
1059 }
1060 
1061 /// Write a record \p DebugMsg about vectorization failure to the debug
1062 /// output stream. If \p I is passed, it is an instruction that prevents
1063 /// vectorization.
1064 #ifndef NDEBUG
1065 static void debugVectorizationFailure(const StringRef DebugMsg,
1066     Instruction *I) {
1067   dbgs() << "LV: Not vectorizing: " << DebugMsg;
1068   if (I != nullptr)
1069     dbgs() << " " << *I;
1070   else
1071     dbgs() << '.';
1072   dbgs() << '\n';
1073 }
1074 #endif
1075 
1076 /// Create an analysis remark that explains why vectorization failed
1077 ///
1078 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1079 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1080 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1081 /// the location of the remark.  \return the remark object that can be
1082 /// streamed to.
1083 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1084     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1085   Value *CodeRegion = TheLoop->getHeader();
1086   DebugLoc DL = TheLoop->getStartLoc();
1087 
1088   if (I) {
1089     CodeRegion = I->getParent();
1090     // If there is no debug location attached to the instruction, revert back to
1091     // using the loop's.
1092     if (I->getDebugLoc())
1093       DL = I->getDebugLoc();
1094   }
1095 
1096   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
1097   R << "loop not vectorized: ";
1098   return R;
1099 }
1100 
1101 /// Return a value for Step multiplied by VF.
1102 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1103   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1104   Constant *StepVal = ConstantInt::get(
1105       Step->getType(),
1106       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1107   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1108 }
1109 
1110 namespace llvm {
1111 
1112 /// Return the runtime value for VF.
1113 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1114   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1115   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1116 }
1117 
1118 void reportVectorizationFailure(const StringRef DebugMsg,
1119     const StringRef OREMsg, const StringRef ORETag,
1120     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
1121   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
1122   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1123   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
1124                 ORETag, TheLoop, I) << OREMsg);
1125 }
1126 
1127 } // end namespace llvm
1128 
1129 #ifndef NDEBUG
1130 /// \return string containing a file name and a line # for the given loop.
1131 static std::string getDebugLocString(const Loop *L) {
1132   std::string Result;
1133   if (L) {
1134     raw_string_ostream OS(Result);
1135     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1136       LoopDbgLoc.print(OS);
1137     else
1138       // Just print the module name.
1139       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1140     OS.flush();
1141   }
1142   return Result;
1143 }
1144 #endif
1145 
1146 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1147                                          const Instruction *Orig) {
1148   // If the loop was versioned with memchecks, add the corresponding no-alias
1149   // metadata.
1150   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1151     LVer->annotateInstWithNoAlias(To, Orig);
1152 }
1153 
1154 void InnerLoopVectorizer::addMetadata(Instruction *To,
1155                                       Instruction *From) {
1156   propagateMetadata(To, From);
1157   addNewMetadata(To, From);
1158 }
1159 
1160 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1161                                       Instruction *From) {
1162   for (Value *V : To) {
1163     if (Instruction *I = dyn_cast<Instruction>(V))
1164       addMetadata(I, From);
1165   }
1166 }
1167 
1168 namespace llvm {
1169 
1170 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1171 // lowered.
1172 enum ScalarEpilogueLowering {
1173 
1174   // The default: allowing scalar epilogues.
1175   CM_ScalarEpilogueAllowed,
1176 
1177   // Vectorization with OptForSize: don't allow epilogues.
1178   CM_ScalarEpilogueNotAllowedOptSize,
1179 
1180   // A special case of vectorisation with OptForSize: loops with a very small
1181   // trip count are considered for vectorization under OptForSize, thereby
1182   // making sure the cost of their loop body is dominant, free of runtime
1183   // guards and scalar iteration overheads.
1184   CM_ScalarEpilogueNotAllowedLowTripLoop,
1185 
1186   // Loop hint predicate indicating an epilogue is undesired.
1187   CM_ScalarEpilogueNotNeededUsePredicate,
1188 
1189   // Directive indicating we must either tail fold or not vectorize
1190   CM_ScalarEpilogueNotAllowedUsePredicate
1191 };
1192 
1193 /// LoopVectorizationCostModel - estimates the expected speedups due to
1194 /// vectorization.
1195 /// In many cases vectorization is not profitable. This can happen because of
1196 /// a number of reasons. In this class we mainly attempt to predict the
1197 /// expected speedup/slowdowns due to the supported instruction set. We use the
1198 /// TargetTransformInfo to query the different backends for the cost of
1199 /// different operations.
1200 class LoopVectorizationCostModel {
1201 public:
1202   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1203                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1204                              LoopVectorizationLegality *Legal,
1205                              const TargetTransformInfo &TTI,
1206                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1207                              AssumptionCache *AC,
1208                              OptimizationRemarkEmitter *ORE, const Function *F,
1209                              const LoopVectorizeHints *Hints,
1210                              InterleavedAccessInfo &IAI)
1211       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1212         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1213         Hints(Hints), InterleaveInfo(IAI) {}
1214 
1215   /// \return An upper bound for the vectorization factor, or None if
1216   /// vectorization and interleaving should be avoided up front.
1217   Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC);
1218 
1219   /// \return True if runtime checks are required for vectorization, and false
1220   /// otherwise.
1221   bool runtimeChecksRequired();
1222 
1223   /// \return The most profitable vectorization factor and the cost of that VF.
1224   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1225   /// then this vectorization factor will be selected if vectorization is
1226   /// possible.
1227   VectorizationFactor selectVectorizationFactor(ElementCount MaxVF);
1228   VectorizationFactor
1229   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1230                                     const LoopVectorizationPlanner &LVP);
1231 
1232   /// Setup cost-based decisions for user vectorization factor.
1233   void selectUserVectorizationFactor(ElementCount UserVF) {
1234     collectUniformsAndScalars(UserVF);
1235     collectInstsToScalarize(UserVF);
1236   }
1237 
1238   /// \return The size (in bits) of the smallest and widest types in the code
1239   /// that needs to be vectorized. We ignore values that remain scalar such as
1240   /// 64 bit loop indices.
1241   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1242 
1243   /// \return The desired interleave count.
1244   /// If interleave count has been specified by metadata it will be returned.
1245   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1246   /// are the selected vectorization factor and the cost of the selected VF.
1247   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1248 
1249   /// Memory access instruction may be vectorized in more than one way.
1250   /// Form of instruction after vectorization depends on cost.
1251   /// This function takes cost-based decisions for Load/Store instructions
1252   /// and collects them in a map. This decisions map is used for building
1253   /// the lists of loop-uniform and loop-scalar instructions.
1254   /// The calculated cost is saved with widening decision in order to
1255   /// avoid redundant calculations.
1256   void setCostBasedWideningDecision(ElementCount VF);
1257 
1258   /// A struct that represents some properties of the register usage
1259   /// of a loop.
1260   struct RegisterUsage {
1261     /// Holds the number of loop invariant values that are used in the loop.
1262     /// The key is ClassID of target-provided register class.
1263     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1264     /// Holds the maximum number of concurrent live intervals in the loop.
1265     /// The key is ClassID of target-provided register class.
1266     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1267   };
1268 
1269   /// \return Returns information about the register usages of the loop for the
1270   /// given vectorization factors.
1271   SmallVector<RegisterUsage, 8>
1272   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1273 
1274   /// Collect values we want to ignore in the cost model.
1275   void collectValuesToIgnore();
1276 
1277   /// Split reductions into those that happen in the loop, and those that happen
1278   /// outside. In loop reductions are collected into InLoopReductionChains.
1279   void collectInLoopReductions();
1280 
1281   /// \returns The smallest bitwidth each instruction can be represented with.
1282   /// The vector equivalents of these instructions should be truncated to this
1283   /// type.
1284   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1285     return MinBWs;
1286   }
1287 
1288   /// \returns True if it is more profitable to scalarize instruction \p I for
1289   /// vectorization factor \p VF.
1290   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1291     assert(VF.isVector() &&
1292            "Profitable to scalarize relevant only for VF > 1.");
1293 
1294     // Cost model is not run in the VPlan-native path - return conservative
1295     // result until this changes.
1296     if (EnableVPlanNativePath)
1297       return false;
1298 
1299     auto Scalars = InstsToScalarize.find(VF);
1300     assert(Scalars != InstsToScalarize.end() &&
1301            "VF not yet analyzed for scalarization profitability");
1302     return Scalars->second.find(I) != Scalars->second.end();
1303   }
1304 
1305   /// Returns true if \p I is known to be uniform after vectorization.
1306   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1307     if (VF.isScalar())
1308       return true;
1309 
1310     // Cost model is not run in the VPlan-native path - return conservative
1311     // result until this changes.
1312     if (EnableVPlanNativePath)
1313       return false;
1314 
1315     auto UniformsPerVF = Uniforms.find(VF);
1316     assert(UniformsPerVF != Uniforms.end() &&
1317            "VF not yet analyzed for uniformity");
1318     return UniformsPerVF->second.count(I);
1319   }
1320 
1321   /// Returns true if \p I is known to be scalar after vectorization.
1322   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1323     if (VF.isScalar())
1324       return true;
1325 
1326     // Cost model is not run in the VPlan-native path - return conservative
1327     // result until this changes.
1328     if (EnableVPlanNativePath)
1329       return false;
1330 
1331     auto ScalarsPerVF = Scalars.find(VF);
1332     assert(ScalarsPerVF != Scalars.end() &&
1333            "Scalar values are not calculated for VF");
1334     return ScalarsPerVF->second.count(I);
1335   }
1336 
1337   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1338   /// for vectorization factor \p VF.
1339   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1340     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1341            !isProfitableToScalarize(I, VF) &&
1342            !isScalarAfterVectorization(I, VF);
1343   }
1344 
1345   /// Decision that was taken during cost calculation for memory instruction.
1346   enum InstWidening {
1347     CM_Unknown,
1348     CM_Widen,         // For consecutive accesses with stride +1.
1349     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1350     CM_Interleave,
1351     CM_GatherScatter,
1352     CM_Scalarize
1353   };
1354 
1355   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1356   /// instruction \p I and vector width \p VF.
1357   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1358                            InstructionCost Cost) {
1359     assert(VF.isVector() && "Expected VF >=2");
1360     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1361   }
1362 
1363   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1364   /// interleaving group \p Grp and vector width \p VF.
1365   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1366                            ElementCount VF, InstWidening W,
1367                            InstructionCost Cost) {
1368     assert(VF.isVector() && "Expected VF >=2");
1369     /// Broadcast this decicion to all instructions inside the group.
1370     /// But the cost will be assigned to one instruction only.
1371     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1372       if (auto *I = Grp->getMember(i)) {
1373         if (Grp->getInsertPos() == I)
1374           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1375         else
1376           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1377       }
1378     }
1379   }
1380 
1381   /// Return the cost model decision for the given instruction \p I and vector
1382   /// width \p VF. Return CM_Unknown if this instruction did not pass
1383   /// through the cost modeling.
1384   InstWidening getWideningDecision(Instruction *I, ElementCount VF) {
1385     assert(VF.isVector() && "Expected VF to be a vector VF");
1386     // Cost model is not run in the VPlan-native path - return conservative
1387     // result until this changes.
1388     if (EnableVPlanNativePath)
1389       return CM_GatherScatter;
1390 
1391     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1392     auto Itr = WideningDecisions.find(InstOnVF);
1393     if (Itr == WideningDecisions.end())
1394       return CM_Unknown;
1395     return Itr->second.first;
1396   }
1397 
1398   /// Return the vectorization cost for the given instruction \p I and vector
1399   /// width \p VF.
1400   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1401     assert(VF.isVector() && "Expected VF >=2");
1402     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1403     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1404            "The cost is not calculated");
1405     return WideningDecisions[InstOnVF].second;
1406   }
1407 
1408   /// Return True if instruction \p I is an optimizable truncate whose operand
1409   /// is an induction variable. Such a truncate will be removed by adding a new
1410   /// induction variable with the destination type.
1411   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1412     // If the instruction is not a truncate, return false.
1413     auto *Trunc = dyn_cast<TruncInst>(I);
1414     if (!Trunc)
1415       return false;
1416 
1417     // Get the source and destination types of the truncate.
1418     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1419     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1420 
1421     // If the truncate is free for the given types, return false. Replacing a
1422     // free truncate with an induction variable would add an induction variable
1423     // update instruction to each iteration of the loop. We exclude from this
1424     // check the primary induction variable since it will need an update
1425     // instruction regardless.
1426     Value *Op = Trunc->getOperand(0);
1427     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1428       return false;
1429 
1430     // If the truncated value is not an induction variable, return false.
1431     return Legal->isInductionPhi(Op);
1432   }
1433 
1434   /// Collects the instructions to scalarize for each predicated instruction in
1435   /// the loop.
1436   void collectInstsToScalarize(ElementCount VF);
1437 
1438   /// Collect Uniform and Scalar values for the given \p VF.
1439   /// The sets depend on CM decision for Load/Store instructions
1440   /// that may be vectorized as interleave, gather-scatter or scalarized.
1441   void collectUniformsAndScalars(ElementCount VF) {
1442     // Do the analysis once.
1443     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1444       return;
1445     setCostBasedWideningDecision(VF);
1446     collectLoopUniforms(VF);
1447     collectLoopScalars(VF);
1448   }
1449 
1450   /// Returns true if the target machine supports masked store operation
1451   /// for the given \p DataType and kind of access to \p Ptr.
1452   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1453     return Legal->isConsecutivePtr(Ptr) &&
1454            TTI.isLegalMaskedStore(DataType, Alignment);
1455   }
1456 
1457   /// Returns true if the target machine supports masked load operation
1458   /// for the given \p DataType and kind of access to \p Ptr.
1459   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1460     return Legal->isConsecutivePtr(Ptr) &&
1461            TTI.isLegalMaskedLoad(DataType, Alignment);
1462   }
1463 
1464   /// Returns true if the target machine supports masked scatter operation
1465   /// for the given \p DataType.
1466   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1467     return TTI.isLegalMaskedScatter(DataType, Alignment);
1468   }
1469 
1470   /// Returns true if the target machine supports masked gather operation
1471   /// for the given \p DataType.
1472   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1473     return TTI.isLegalMaskedGather(DataType, Alignment);
1474   }
1475 
1476   /// Returns true if the target machine can represent \p V as a masked gather
1477   /// or scatter operation.
1478   bool isLegalGatherOrScatter(Value *V) {
1479     bool LI = isa<LoadInst>(V);
1480     bool SI = isa<StoreInst>(V);
1481     if (!LI && !SI)
1482       return false;
1483     auto *Ty = getMemInstValueType(V);
1484     Align Align = getLoadStoreAlignment(V);
1485     return (LI && isLegalMaskedGather(Ty, Align)) ||
1486            (SI && isLegalMaskedScatter(Ty, Align));
1487   }
1488 
1489   /// Returns true if the target machine supports all of the reduction
1490   /// variables found for the given VF.
1491   bool canVectorizeReductions(ElementCount VF) {
1492     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1493       RecurrenceDescriptor RdxDesc = Reduction.second;
1494       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1495     }));
1496   }
1497 
1498   /// Returns true if \p I is an instruction that will be scalarized with
1499   /// predication. Such instructions include conditional stores and
1500   /// instructions that may divide by zero.
1501   /// If a non-zero VF has been calculated, we check if I will be scalarized
1502   /// predication for that VF.
1503   bool isScalarWithPredication(Instruction *I,
1504                                ElementCount VF = ElementCount::getFixed(1));
1505 
1506   // Returns true if \p I is an instruction that will be predicated either
1507   // through scalar predication or masked load/store or masked gather/scatter.
1508   // Superset of instructions that return true for isScalarWithPredication.
1509   bool isPredicatedInst(Instruction *I) {
1510     if (!blockNeedsPredication(I->getParent()))
1511       return false;
1512     // Loads and stores that need some form of masked operation are predicated
1513     // instructions.
1514     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1515       return Legal->isMaskRequired(I);
1516     return isScalarWithPredication(I);
1517   }
1518 
1519   /// Returns true if \p I is a memory instruction with consecutive memory
1520   /// access that can be widened.
1521   bool
1522   memoryInstructionCanBeWidened(Instruction *I,
1523                                 ElementCount VF = ElementCount::getFixed(1));
1524 
1525   /// Returns true if \p I is a memory instruction in an interleaved-group
1526   /// of memory accesses that can be vectorized with wide vector loads/stores
1527   /// and shuffles.
1528   bool
1529   interleavedAccessCanBeWidened(Instruction *I,
1530                                 ElementCount VF = ElementCount::getFixed(1));
1531 
1532   /// Check if \p Instr belongs to any interleaved access group.
1533   bool isAccessInterleaved(Instruction *Instr) {
1534     return InterleaveInfo.isInterleaved(Instr);
1535   }
1536 
1537   /// Get the interleaved access group that \p Instr belongs to.
1538   const InterleaveGroup<Instruction> *
1539   getInterleavedAccessGroup(Instruction *Instr) {
1540     return InterleaveInfo.getInterleaveGroup(Instr);
1541   }
1542 
1543   /// Returns true if we're required to use a scalar epilogue for at least
1544   /// the final iteration of the original loop.
1545   bool requiresScalarEpilogue() const {
1546     if (!isScalarEpilogueAllowed())
1547       return false;
1548     // If we might exit from anywhere but the latch, must run the exiting
1549     // iteration in scalar form.
1550     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1551       return true;
1552     return InterleaveInfo.requiresScalarEpilogue();
1553   }
1554 
1555   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1556   /// loop hint annotation.
1557   bool isScalarEpilogueAllowed() const {
1558     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1559   }
1560 
1561   /// Returns true if all loop blocks should be masked to fold tail loop.
1562   bool foldTailByMasking() const { return FoldTailByMasking; }
1563 
1564   bool blockNeedsPredication(BasicBlock *BB) {
1565     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1566   }
1567 
1568   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1569   /// nodes to the chain of instructions representing the reductions. Uses a
1570   /// MapVector to ensure deterministic iteration order.
1571   using ReductionChainMap =
1572       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1573 
1574   /// Return the chain of instructions representing an inloop reduction.
1575   const ReductionChainMap &getInLoopReductionChains() const {
1576     return InLoopReductionChains;
1577   }
1578 
1579   /// Returns true if the Phi is part of an inloop reduction.
1580   bool isInLoopReduction(PHINode *Phi) const {
1581     return InLoopReductionChains.count(Phi);
1582   }
1583 
1584   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1585   /// with factor VF.  Return the cost of the instruction, including
1586   /// scalarization overhead if it's needed.
1587   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF);
1588 
1589   /// Estimate cost of a call instruction CI if it were vectorized with factor
1590   /// VF. Return the cost of the instruction, including scalarization overhead
1591   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1592   /// scalarized -
1593   /// i.e. either vector version isn't available, or is too expensive.
1594   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1595                                     bool &NeedToScalarize);
1596 
1597   /// Invalidates decisions already taken by the cost model.
1598   void invalidateCostModelingDecisions() {
1599     WideningDecisions.clear();
1600     Uniforms.clear();
1601     Scalars.clear();
1602   }
1603 
1604 private:
1605   unsigned NumPredStores = 0;
1606 
1607   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1608   /// than zero. One is returned if vectorization should best be avoided due
1609   /// to cost.
1610   ElementCount computeFeasibleMaxVF(unsigned ConstTripCount,
1611                                     ElementCount UserVF);
1612 
1613   /// The vectorization cost is a combination of the cost itself and a boolean
1614   /// indicating whether any of the contributing operations will actually
1615   /// operate on
1616   /// vector values after type legalization in the backend. If this latter value
1617   /// is
1618   /// false, then all operations will be scalarized (i.e. no vectorization has
1619   /// actually taken place).
1620   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1621 
1622   /// Returns the expected execution cost. The unit of the cost does
1623   /// not matter because we use the 'cost' units to compare different
1624   /// vector widths. The cost that is returned is *not* normalized by
1625   /// the factor width.
1626   VectorizationCostTy expectedCost(ElementCount VF);
1627 
1628   /// Returns the execution time cost of an instruction for a given vector
1629   /// width. Vector width of one means scalar.
1630   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1631 
1632   /// The cost-computation logic from getInstructionCost which provides
1633   /// the vector type as an output parameter.
1634   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1635                                      Type *&VectorTy);
1636 
1637   /// Return the cost of instructions in an inloop reduction pattern, if I is
1638   /// part of that pattern.
1639   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1640                                           Type *VectorTy,
1641                                           TTI::TargetCostKind CostKind);
1642 
1643   /// Calculate vectorization cost of memory instruction \p I.
1644   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1645 
1646   /// The cost computation for scalarized memory instruction.
1647   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1648 
1649   /// The cost computation for interleaving group of memory instructions.
1650   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1651 
1652   /// The cost computation for Gather/Scatter instruction.
1653   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1654 
1655   /// The cost computation for widening instruction \p I with consecutive
1656   /// memory access.
1657   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1658 
1659   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1660   /// Load: scalar load + broadcast.
1661   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1662   /// element)
1663   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1664 
1665   /// Estimate the overhead of scalarizing an instruction. This is a
1666   /// convenience wrapper for the type-based getScalarizationOverhead API.
1667   InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF);
1668 
1669   /// Returns whether the instruction is a load or store and will be a emitted
1670   /// as a vector operation.
1671   bool isConsecutiveLoadOrStore(Instruction *I);
1672 
1673   /// Returns true if an artificially high cost for emulated masked memrefs
1674   /// should be used.
1675   bool useEmulatedMaskMemRefHack(Instruction *I);
1676 
1677   /// Map of scalar integer values to the smallest bitwidth they can be legally
1678   /// represented as. The vector equivalents of these values should be truncated
1679   /// to this type.
1680   MapVector<Instruction *, uint64_t> MinBWs;
1681 
1682   /// A type representing the costs for instructions if they were to be
1683   /// scalarized rather than vectorized. The entries are Instruction-Cost
1684   /// pairs.
1685   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1686 
1687   /// A set containing all BasicBlocks that are known to present after
1688   /// vectorization as a predicated block.
1689   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1690 
1691   /// Records whether it is allowed to have the original scalar loop execute at
1692   /// least once. This may be needed as a fallback loop in case runtime
1693   /// aliasing/dependence checks fail, or to handle the tail/remainder
1694   /// iterations when the trip count is unknown or doesn't divide by the VF,
1695   /// or as a peel-loop to handle gaps in interleave-groups.
1696   /// Under optsize and when the trip count is very small we don't allow any
1697   /// iterations to execute in the scalar loop.
1698   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1699 
1700   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1701   bool FoldTailByMasking = false;
1702 
1703   /// A map holding scalar costs for different vectorization factors. The
1704   /// presence of a cost for an instruction in the mapping indicates that the
1705   /// instruction will be scalarized when vectorizing with the associated
1706   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1707   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1708 
1709   /// Holds the instructions known to be uniform after vectorization.
1710   /// The data is collected per VF.
1711   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1712 
1713   /// Holds the instructions known to be scalar after vectorization.
1714   /// The data is collected per VF.
1715   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1716 
1717   /// Holds the instructions (address computations) that are forced to be
1718   /// scalarized.
1719   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1720 
1721   /// PHINodes of the reductions that should be expanded in-loop along with
1722   /// their associated chains of reduction operations, in program order from top
1723   /// (PHI) to bottom
1724   ReductionChainMap InLoopReductionChains;
1725 
1726   /// A Map of inloop reduction operations and their immediate chain operand.
1727   /// FIXME: This can be removed once reductions can be costed correctly in
1728   /// vplan. This was added to allow quick lookup to the inloop operations,
1729   /// without having to loop through InLoopReductionChains.
1730   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1731 
1732   /// Returns the expected difference in cost from scalarizing the expression
1733   /// feeding a predicated instruction \p PredInst. The instructions to
1734   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1735   /// non-negative return value implies the expression will be scalarized.
1736   /// Currently, only single-use chains are considered for scalarization.
1737   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1738                               ElementCount VF);
1739 
1740   /// Collect the instructions that are uniform after vectorization. An
1741   /// instruction is uniform if we represent it with a single scalar value in
1742   /// the vectorized loop corresponding to each vector iteration. Examples of
1743   /// uniform instructions include pointer operands of consecutive or
1744   /// interleaved memory accesses. Note that although uniformity implies an
1745   /// instruction will be scalar, the reverse is not true. In general, a
1746   /// scalarized instruction will be represented by VF scalar values in the
1747   /// vectorized loop, each corresponding to an iteration of the original
1748   /// scalar loop.
1749   void collectLoopUniforms(ElementCount VF);
1750 
1751   /// Collect the instructions that are scalar after vectorization. An
1752   /// instruction is scalar if it is known to be uniform or will be scalarized
1753   /// during vectorization. Non-uniform scalarized instructions will be
1754   /// represented by VF values in the vectorized loop, each corresponding to an
1755   /// iteration of the original scalar loop.
1756   void collectLoopScalars(ElementCount VF);
1757 
1758   /// Keeps cost model vectorization decision and cost for instructions.
1759   /// Right now it is used for memory instructions only.
1760   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1761                                 std::pair<InstWidening, InstructionCost>>;
1762 
1763   DecisionList WideningDecisions;
1764 
1765   /// Returns true if \p V is expected to be vectorized and it needs to be
1766   /// extracted.
1767   bool needsExtract(Value *V, ElementCount VF) const {
1768     Instruction *I = dyn_cast<Instruction>(V);
1769     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1770         TheLoop->isLoopInvariant(I))
1771       return false;
1772 
1773     // Assume we can vectorize V (and hence we need extraction) if the
1774     // scalars are not computed yet. This can happen, because it is called
1775     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1776     // the scalars are collected. That should be a safe assumption in most
1777     // cases, because we check if the operands have vectorizable types
1778     // beforehand in LoopVectorizationLegality.
1779     return Scalars.find(VF) == Scalars.end() ||
1780            !isScalarAfterVectorization(I, VF);
1781   };
1782 
1783   /// Returns a range containing only operands needing to be extracted.
1784   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1785                                                    ElementCount VF) {
1786     return SmallVector<Value *, 4>(make_filter_range(
1787         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1788   }
1789 
1790   /// Determines if we have the infrastructure to vectorize loop \p L and its
1791   /// epilogue, assuming the main loop is vectorized by \p VF.
1792   bool isCandidateForEpilogueVectorization(const Loop &L,
1793                                            const ElementCount VF) const;
1794 
1795   /// Returns true if epilogue vectorization is considered profitable, and
1796   /// false otherwise.
1797   /// \p VF is the vectorization factor chosen for the original loop.
1798   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1799 
1800 public:
1801   /// The loop that we evaluate.
1802   Loop *TheLoop;
1803 
1804   /// Predicated scalar evolution analysis.
1805   PredicatedScalarEvolution &PSE;
1806 
1807   /// Loop Info analysis.
1808   LoopInfo *LI;
1809 
1810   /// Vectorization legality.
1811   LoopVectorizationLegality *Legal;
1812 
1813   /// Vector target information.
1814   const TargetTransformInfo &TTI;
1815 
1816   /// Target Library Info.
1817   const TargetLibraryInfo *TLI;
1818 
1819   /// Demanded bits analysis.
1820   DemandedBits *DB;
1821 
1822   /// Assumption cache.
1823   AssumptionCache *AC;
1824 
1825   /// Interface to emit optimization remarks.
1826   OptimizationRemarkEmitter *ORE;
1827 
1828   const Function *TheFunction;
1829 
1830   /// Loop Vectorize Hint.
1831   const LoopVectorizeHints *Hints;
1832 
1833   /// The interleave access information contains groups of interleaved accesses
1834   /// with the same stride and close to each other.
1835   InterleavedAccessInfo &InterleaveInfo;
1836 
1837   /// Values to ignore in the cost model.
1838   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1839 
1840   /// Values to ignore in the cost model when VF > 1.
1841   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1842 
1843   /// Profitable vector factors.
1844   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1845 };
1846 } // end namespace llvm
1847 
1848 /// Helper struct to manage generating runtime checks for vectorization.
1849 ///
1850 /// The runtime checks are created up-front in temporary blocks to allow better
1851 /// estimating the cost and un-linked from the existing IR. After deciding to
1852 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1853 /// temporary blocks are completely removed.
1854 class GeneratedRTChecks {
1855   /// Basic block which contains the generated SCEV checks, if any.
1856   BasicBlock *SCEVCheckBlock = nullptr;
1857 
1858   /// The value representing the result of the generated SCEV checks. If it is
1859   /// nullptr, either no SCEV checks have been generated or they have been used.
1860   Value *SCEVCheckCond = nullptr;
1861 
1862   /// Basic block which contains the generated memory runtime checks, if any.
1863   BasicBlock *MemCheckBlock = nullptr;
1864 
1865   /// The value representing the result of the generated memory runtime checks.
1866   /// If it is nullptr, either no memory runtime checks have been generated or
1867   /// they have been used.
1868   Instruction *MemRuntimeCheckCond = nullptr;
1869 
1870   DominatorTree *DT;
1871   LoopInfo *LI;
1872 
1873   SCEVExpander SCEVExp;
1874   SCEVExpander MemCheckExp;
1875 
1876 public:
1877   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1878                     const DataLayout &DL)
1879       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1880         MemCheckExp(SE, DL, "scev.check") {}
1881 
1882   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1883   /// accurately estimate the cost of the runtime checks. The blocks are
1884   /// un-linked from the IR and is added back during vector code generation. If
1885   /// there is no vector code generation, the check blocks are removed
1886   /// completely.
1887   void Create(Loop *L, const LoopAccessInfo &LAI,
1888               const SCEVUnionPredicate &UnionPred) {
1889 
1890     BasicBlock *LoopHeader = L->getHeader();
1891     BasicBlock *Preheader = L->getLoopPreheader();
1892 
1893     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1894     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1895     // may be used by SCEVExpander. The blocks will be un-linked from their
1896     // predecessors and removed from LI & DT at the end of the function.
1897     if (!UnionPred.isAlwaysTrue()) {
1898       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1899                                   nullptr, "vector.scevcheck");
1900 
1901       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1902           &UnionPred, SCEVCheckBlock->getTerminator());
1903     }
1904 
1905     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1906     if (RtPtrChecking.Need) {
1907       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1908       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1909                                  "vector.memcheck");
1910 
1911       std::tie(std::ignore, MemRuntimeCheckCond) =
1912           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1913                            RtPtrChecking.getChecks(), MemCheckExp);
1914       assert(MemRuntimeCheckCond &&
1915              "no RT checks generated although RtPtrChecking "
1916              "claimed checks are required");
1917     }
1918 
1919     if (!MemCheckBlock && !SCEVCheckBlock)
1920       return;
1921 
1922     // Unhook the temporary block with the checks, update various places
1923     // accordingly.
1924     if (SCEVCheckBlock)
1925       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1926     if (MemCheckBlock)
1927       MemCheckBlock->replaceAllUsesWith(Preheader);
1928 
1929     if (SCEVCheckBlock) {
1930       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1931       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1932       Preheader->getTerminator()->eraseFromParent();
1933     }
1934     if (MemCheckBlock) {
1935       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1936       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1937       Preheader->getTerminator()->eraseFromParent();
1938     }
1939 
1940     DT->changeImmediateDominator(LoopHeader, Preheader);
1941     if (MemCheckBlock) {
1942       DT->eraseNode(MemCheckBlock);
1943       LI->removeBlock(MemCheckBlock);
1944     }
1945     if (SCEVCheckBlock) {
1946       DT->eraseNode(SCEVCheckBlock);
1947       LI->removeBlock(SCEVCheckBlock);
1948     }
1949   }
1950 
1951   /// Remove the created SCEV & memory runtime check blocks & instructions, if
1952   /// unused.
1953   ~GeneratedRTChecks() {
1954     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
1955     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
1956     if (!SCEVCheckCond)
1957       SCEVCleaner.markResultUsed();
1958 
1959     if (!MemRuntimeCheckCond)
1960       MemCheckCleaner.markResultUsed();
1961 
1962     if (MemRuntimeCheckCond) {
1963       auto &SE = *MemCheckExp.getSE();
1964       // Memory runtime check generation creates compares that use expanded
1965       // values. Remove them before running the SCEVExpanderCleaners.
1966       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1967         if (MemCheckExp.isInsertedInstruction(&I))
1968           continue;
1969         SE.forgetValue(&I);
1970         SE.eraseValueFromMap(&I);
1971         I.eraseFromParent();
1972       }
1973     }
1974     MemCheckCleaner.cleanup();
1975     SCEVCleaner.cleanup();
1976 
1977     if (SCEVCheckCond)
1978       SCEVCheckBlock->eraseFromParent();
1979     if (MemRuntimeCheckCond)
1980       MemCheckBlock->eraseFromParent();
1981   }
1982 
1983   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
1984   /// adjusts the branches to branch to the vector preheader or \p Bypass,
1985   /// depending on the generated condition.
1986   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
1987                              BasicBlock *LoopVectorPreHeader,
1988                              BasicBlock *LoopExitBlock) {
1989     if (!SCEVCheckCond)
1990       return nullptr;
1991     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
1992       if (C->isZero())
1993         return nullptr;
1994 
1995     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
1996 
1997     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
1998     // Create new preheader for vector loop.
1999     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2000       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2001 
2002     SCEVCheckBlock->getTerminator()->eraseFromParent();
2003     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2004     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2005                                                 SCEVCheckBlock);
2006 
2007     DT->addNewBlock(SCEVCheckBlock, Pred);
2008     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2009 
2010     ReplaceInstWithInst(
2011         SCEVCheckBlock->getTerminator(),
2012         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2013     // Mark the check as used, to prevent it from being removed during cleanup.
2014     SCEVCheckCond = nullptr;
2015     return SCEVCheckBlock;
2016   }
2017 
2018   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2019   /// the branches to branch to the vector preheader or \p Bypass, depending on
2020   /// the generated condition.
2021   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2022                                    BasicBlock *LoopVectorPreHeader) {
2023     // Check if we generated code that checks in runtime if arrays overlap.
2024     if (!MemRuntimeCheckCond)
2025       return nullptr;
2026 
2027     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2028     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2029                                                 MemCheckBlock);
2030 
2031     DT->addNewBlock(MemCheckBlock, Pred);
2032     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2033     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2034 
2035     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2036       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2037 
2038     ReplaceInstWithInst(
2039         MemCheckBlock->getTerminator(),
2040         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2041     MemCheckBlock->getTerminator()->setDebugLoc(
2042         Pred->getTerminator()->getDebugLoc());
2043 
2044     // Mark the check as used, to prevent it from being removed during cleanup.
2045     MemRuntimeCheckCond = nullptr;
2046     return MemCheckBlock;
2047   }
2048 };
2049 
2050 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2051 // vectorization. The loop needs to be annotated with #pragma omp simd
2052 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2053 // vector length information is not provided, vectorization is not considered
2054 // explicit. Interleave hints are not allowed either. These limitations will be
2055 // relaxed in the future.
2056 // Please, note that we are currently forced to abuse the pragma 'clang
2057 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2058 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2059 // provides *explicit vectorization hints* (LV can bypass legal checks and
2060 // assume that vectorization is legal). However, both hints are implemented
2061 // using the same metadata (llvm.loop.vectorize, processed by
2062 // LoopVectorizeHints). This will be fixed in the future when the native IR
2063 // representation for pragma 'omp simd' is introduced.
2064 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2065                                    OptimizationRemarkEmitter *ORE) {
2066   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2067   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2068 
2069   // Only outer loops with an explicit vectorization hint are supported.
2070   // Unannotated outer loops are ignored.
2071   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2072     return false;
2073 
2074   Function *Fn = OuterLp->getHeader()->getParent();
2075   if (!Hints.allowVectorization(Fn, OuterLp,
2076                                 true /*VectorizeOnlyWhenForced*/)) {
2077     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2078     return false;
2079   }
2080 
2081   if (Hints.getInterleave() > 1) {
2082     // TODO: Interleave support is future work.
2083     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2084                          "outer loops.\n");
2085     Hints.emitRemarkWithHints();
2086     return false;
2087   }
2088 
2089   return true;
2090 }
2091 
2092 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2093                                   OptimizationRemarkEmitter *ORE,
2094                                   SmallVectorImpl<Loop *> &V) {
2095   // Collect inner loops and outer loops without irreducible control flow. For
2096   // now, only collect outer loops that have explicit vectorization hints. If we
2097   // are stress testing the VPlan H-CFG construction, we collect the outermost
2098   // loop of every loop nest.
2099   if (L.isInnermost() || VPlanBuildStressTest ||
2100       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2101     LoopBlocksRPO RPOT(&L);
2102     RPOT.perform(LI);
2103     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2104       V.push_back(&L);
2105       // TODO: Collect inner loops inside marked outer loops in case
2106       // vectorization fails for the outer loop. Do not invoke
2107       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2108       // already known to be reducible. We can use an inherited attribute for
2109       // that.
2110       return;
2111     }
2112   }
2113   for (Loop *InnerL : L)
2114     collectSupportedLoops(*InnerL, LI, ORE, V);
2115 }
2116 
2117 namespace {
2118 
2119 /// The LoopVectorize Pass.
2120 struct LoopVectorize : public FunctionPass {
2121   /// Pass identification, replacement for typeid
2122   static char ID;
2123 
2124   LoopVectorizePass Impl;
2125 
2126   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2127                          bool VectorizeOnlyWhenForced = false)
2128       : FunctionPass(ID),
2129         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2130     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2131   }
2132 
2133   bool runOnFunction(Function &F) override {
2134     if (skipFunction(F))
2135       return false;
2136 
2137     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2138     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2139     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2140     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2141     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2142     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2143     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2144     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2145     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2146     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2147     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2148     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2149     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2150 
2151     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2152         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2153 
2154     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2155                         GetLAA, *ORE, PSI).MadeAnyChange;
2156   }
2157 
2158   void getAnalysisUsage(AnalysisUsage &AU) const override {
2159     AU.addRequired<AssumptionCacheTracker>();
2160     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2161     AU.addRequired<DominatorTreeWrapperPass>();
2162     AU.addRequired<LoopInfoWrapperPass>();
2163     AU.addRequired<ScalarEvolutionWrapperPass>();
2164     AU.addRequired<TargetTransformInfoWrapperPass>();
2165     AU.addRequired<AAResultsWrapperPass>();
2166     AU.addRequired<LoopAccessLegacyAnalysis>();
2167     AU.addRequired<DemandedBitsWrapperPass>();
2168     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2169     AU.addRequired<InjectTLIMappingsLegacy>();
2170 
2171     // We currently do not preserve loopinfo/dominator analyses with outer loop
2172     // vectorization. Until this is addressed, mark these analyses as preserved
2173     // only for non-VPlan-native path.
2174     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2175     if (!EnableVPlanNativePath) {
2176       AU.addPreserved<LoopInfoWrapperPass>();
2177       AU.addPreserved<DominatorTreeWrapperPass>();
2178     }
2179 
2180     AU.addPreserved<BasicAAWrapperPass>();
2181     AU.addPreserved<GlobalsAAWrapperPass>();
2182     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2183   }
2184 };
2185 
2186 } // end anonymous namespace
2187 
2188 //===----------------------------------------------------------------------===//
2189 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2190 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2191 //===----------------------------------------------------------------------===//
2192 
2193 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2194   // We need to place the broadcast of invariant variables outside the loop,
2195   // but only if it's proven safe to do so. Else, broadcast will be inside
2196   // vector loop body.
2197   Instruction *Instr = dyn_cast<Instruction>(V);
2198   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2199                      (!Instr ||
2200                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2201   // Place the code for broadcasting invariant variables in the new preheader.
2202   IRBuilder<>::InsertPointGuard Guard(Builder);
2203   if (SafeToHoist)
2204     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2205 
2206   // Broadcast the scalar into all locations in the vector.
2207   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2208 
2209   return Shuf;
2210 }
2211 
2212 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2213     const InductionDescriptor &II, Value *Step, Value *Start,
2214     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2215     VPTransformState &State) {
2216   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2217          "Expected either an induction phi-node or a truncate of it!");
2218 
2219   // Construct the initial value of the vector IV in the vector loop preheader
2220   auto CurrIP = Builder.saveIP();
2221   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2222   if (isa<TruncInst>(EntryVal)) {
2223     assert(Start->getType()->isIntegerTy() &&
2224            "Truncation requires an integer type");
2225     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2226     Step = Builder.CreateTrunc(Step, TruncType);
2227     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2228   }
2229   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2230   Value *SteppedStart =
2231       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2232 
2233   // We create vector phi nodes for both integer and floating-point induction
2234   // variables. Here, we determine the kind of arithmetic we will perform.
2235   Instruction::BinaryOps AddOp;
2236   Instruction::BinaryOps MulOp;
2237   if (Step->getType()->isIntegerTy()) {
2238     AddOp = Instruction::Add;
2239     MulOp = Instruction::Mul;
2240   } else {
2241     AddOp = II.getInductionOpcode();
2242     MulOp = Instruction::FMul;
2243   }
2244 
2245   // Multiply the vectorization factor by the step using integer or
2246   // floating-point arithmetic as appropriate.
2247   Value *ConstVF =
2248       getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue());
2249   Value *Mul = Builder.CreateBinOp(MulOp, Step, ConstVF);
2250 
2251   // Create a vector splat to use in the induction update.
2252   //
2253   // FIXME: If the step is non-constant, we create the vector splat with
2254   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2255   //        handle a constant vector splat.
2256   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2257   Value *SplatVF = isa<Constant>(Mul)
2258                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2259                        : Builder.CreateVectorSplat(VF, Mul);
2260   Builder.restoreIP(CurrIP);
2261 
2262   // We may need to add the step a number of times, depending on the unroll
2263   // factor. The last of those goes into the PHI.
2264   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2265                                     &*LoopVectorBody->getFirstInsertionPt());
2266   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2267   Instruction *LastInduction = VecInd;
2268   for (unsigned Part = 0; Part < UF; ++Part) {
2269     State.set(Def, LastInduction, Part);
2270 
2271     if (isa<TruncInst>(EntryVal))
2272       addMetadata(LastInduction, EntryVal);
2273     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2274                                           State, Part);
2275 
2276     LastInduction = cast<Instruction>(
2277         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2278     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2279   }
2280 
2281   // Move the last step to the end of the latch block. This ensures consistent
2282   // placement of all induction updates.
2283   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2284   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2285   auto *ICmp = cast<Instruction>(Br->getCondition());
2286   LastInduction->moveBefore(ICmp);
2287   LastInduction->setName("vec.ind.next");
2288 
2289   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2290   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2291 }
2292 
2293 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2294   return Cost->isScalarAfterVectorization(I, VF) ||
2295          Cost->isProfitableToScalarize(I, VF);
2296 }
2297 
2298 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2299   if (shouldScalarizeInstruction(IV))
2300     return true;
2301   auto isScalarInst = [&](User *U) -> bool {
2302     auto *I = cast<Instruction>(U);
2303     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2304   };
2305   return llvm::any_of(IV->users(), isScalarInst);
2306 }
2307 
2308 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2309     const InductionDescriptor &ID, const Instruction *EntryVal,
2310     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2311     unsigned Part, unsigned Lane) {
2312   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2313          "Expected either an induction phi-node or a truncate of it!");
2314 
2315   // This induction variable is not the phi from the original loop but the
2316   // newly-created IV based on the proof that casted Phi is equal to the
2317   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2318   // re-uses the same InductionDescriptor that original IV uses but we don't
2319   // have to do any recording in this case - that is done when original IV is
2320   // processed.
2321   if (isa<TruncInst>(EntryVal))
2322     return;
2323 
2324   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2325   if (Casts.empty())
2326     return;
2327   // Only the first Cast instruction in the Casts vector is of interest.
2328   // The rest of the Casts (if exist) have no uses outside the
2329   // induction update chain itself.
2330   if (Lane < UINT_MAX)
2331     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2332   else
2333     State.set(CastDef, VectorLoopVal, Part);
2334 }
2335 
2336 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2337                                                 TruncInst *Trunc, VPValue *Def,
2338                                                 VPValue *CastDef,
2339                                                 VPTransformState &State) {
2340   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2341          "Primary induction variable must have an integer type");
2342 
2343   auto II = Legal->getInductionVars().find(IV);
2344   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2345 
2346   auto ID = II->second;
2347   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2348 
2349   // The value from the original loop to which we are mapping the new induction
2350   // variable.
2351   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2352 
2353   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2354 
2355   // Generate code for the induction step. Note that induction steps are
2356   // required to be loop-invariant
2357   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2358     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2359            "Induction step should be loop invariant");
2360     if (PSE.getSE()->isSCEVable(IV->getType())) {
2361       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2362       return Exp.expandCodeFor(Step, Step->getType(),
2363                                LoopVectorPreHeader->getTerminator());
2364     }
2365     return cast<SCEVUnknown>(Step)->getValue();
2366   };
2367 
2368   // The scalar value to broadcast. This is derived from the canonical
2369   // induction variable. If a truncation type is given, truncate the canonical
2370   // induction variable and step. Otherwise, derive these values from the
2371   // induction descriptor.
2372   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2373     Value *ScalarIV = Induction;
2374     if (IV != OldInduction) {
2375       ScalarIV = IV->getType()->isIntegerTy()
2376                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2377                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2378                                           IV->getType());
2379       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2380       ScalarIV->setName("offset.idx");
2381     }
2382     if (Trunc) {
2383       auto *TruncType = cast<IntegerType>(Trunc->getType());
2384       assert(Step->getType()->isIntegerTy() &&
2385              "Truncation requires an integer step");
2386       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2387       Step = Builder.CreateTrunc(Step, TruncType);
2388     }
2389     return ScalarIV;
2390   };
2391 
2392   // Create the vector values from the scalar IV, in the absence of creating a
2393   // vector IV.
2394   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2395     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2396     for (unsigned Part = 0; Part < UF; ++Part) {
2397       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2398       Value *EntryPart =
2399           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2400                         ID.getInductionOpcode());
2401       State.set(Def, EntryPart, Part);
2402       if (Trunc)
2403         addMetadata(EntryPart, Trunc);
2404       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2405                                             State, Part);
2406     }
2407   };
2408 
2409   // Fast-math-flags propagate from the original induction instruction.
2410   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2411   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2412     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2413 
2414   // Now do the actual transformations, and start with creating the step value.
2415   Value *Step = CreateStepValue(ID.getStep());
2416   if (VF.isZero() || VF.isScalar()) {
2417     Value *ScalarIV = CreateScalarIV(Step);
2418     CreateSplatIV(ScalarIV, Step);
2419     return;
2420   }
2421 
2422   // Determine if we want a scalar version of the induction variable. This is
2423   // true if the induction variable itself is not widened, or if it has at
2424   // least one user in the loop that is not widened.
2425   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2426   if (!NeedsScalarIV) {
2427     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2428                                     State);
2429     return;
2430   }
2431 
2432   // Try to create a new independent vector induction variable. If we can't
2433   // create the phi node, we will splat the scalar induction variable in each
2434   // loop iteration.
2435   if (!shouldScalarizeInstruction(EntryVal)) {
2436     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2437                                     State);
2438     Value *ScalarIV = CreateScalarIV(Step);
2439     // Create scalar steps that can be used by instructions we will later
2440     // scalarize. Note that the addition of the scalar steps will not increase
2441     // the number of instructions in the loop in the common case prior to
2442     // InstCombine. We will be trading one vector extract for each scalar step.
2443     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2444     return;
2445   }
2446 
2447   // All IV users are scalar instructions, so only emit a scalar IV, not a
2448   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2449   // predicate used by the masked loads/stores.
2450   Value *ScalarIV = CreateScalarIV(Step);
2451   if (!Cost->isScalarEpilogueAllowed())
2452     CreateSplatIV(ScalarIV, Step);
2453   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2454 }
2455 
2456 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2457                                           Instruction::BinaryOps BinOp) {
2458   // Create and check the types.
2459   auto *ValVTy = cast<FixedVectorType>(Val->getType());
2460   int VLen = ValVTy->getNumElements();
2461 
2462   Type *STy = Val->getType()->getScalarType();
2463   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2464          "Induction Step must be an integer or FP");
2465   assert(Step->getType() == STy && "Step has wrong type");
2466 
2467   SmallVector<Constant *, 8> Indices;
2468 
2469   if (STy->isIntegerTy()) {
2470     // Create a vector of consecutive numbers from zero to VF.
2471     for (int i = 0; i < VLen; ++i)
2472       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2473 
2474     // Add the consecutive indices to the vector value.
2475     Constant *Cv = ConstantVector::get(Indices);
2476     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2477     Step = Builder.CreateVectorSplat(VLen, Step);
2478     assert(Step->getType() == Val->getType() && "Invalid step vec");
2479     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2480     // which can be found from the original scalar operations.
2481     Step = Builder.CreateMul(Cv, Step);
2482     return Builder.CreateAdd(Val, Step, "induction");
2483   }
2484 
2485   // Floating point induction.
2486   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2487          "Binary Opcode should be specified for FP induction");
2488   // Create a vector of consecutive numbers from zero to VF.
2489   for (int i = 0; i < VLen; ++i)
2490     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2491 
2492   // Add the consecutive indices to the vector value.
2493   // Floating-point operations inherit FMF via the builder's flags.
2494   Constant *Cv = ConstantVector::get(Indices);
2495   Step = Builder.CreateVectorSplat(VLen, Step);
2496   Value *MulOp = Builder.CreateFMul(Cv, Step);
2497   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2498 }
2499 
2500 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2501                                            Instruction *EntryVal,
2502                                            const InductionDescriptor &ID,
2503                                            VPValue *Def, VPValue *CastDef,
2504                                            VPTransformState &State) {
2505   // We shouldn't have to build scalar steps if we aren't vectorizing.
2506   assert(VF.isVector() && "VF should be greater than one");
2507   // Get the value type and ensure it and the step have the same integer type.
2508   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2509   assert(ScalarIVTy == Step->getType() &&
2510          "Val and Step should have the same type");
2511 
2512   // We build scalar steps for both integer and floating-point induction
2513   // variables. Here, we determine the kind of arithmetic we will perform.
2514   Instruction::BinaryOps AddOp;
2515   Instruction::BinaryOps MulOp;
2516   if (ScalarIVTy->isIntegerTy()) {
2517     AddOp = Instruction::Add;
2518     MulOp = Instruction::Mul;
2519   } else {
2520     AddOp = ID.getInductionOpcode();
2521     MulOp = Instruction::FMul;
2522   }
2523 
2524   // Determine the number of scalars we need to generate for each unroll
2525   // iteration. If EntryVal is uniform, we only need to generate the first
2526   // lane. Otherwise, we generate all VF values.
2527   unsigned Lanes =
2528       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF)
2529           ? 1
2530           : VF.getKnownMinValue();
2531   assert((!VF.isScalable() || Lanes == 1) &&
2532          "Should never scalarize a scalable vector");
2533   // Compute the scalar steps and save the results in State.
2534   for (unsigned Part = 0; Part < UF; ++Part) {
2535     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2536       auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2537                                          ScalarIVTy->getScalarSizeInBits());
2538       Value *StartIdx =
2539           createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2540       if (ScalarIVTy->isFloatingPointTy())
2541         StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy);
2542       StartIdx = Builder.CreateBinOp(
2543           AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2544       // The step returned by `createStepForVF` is a runtime-evaluated value
2545       // when VF is scalable. Otherwise, it should be folded into a Constant.
2546       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2547              "Expected StartIdx to be folded to a constant when VF is not "
2548              "scalable");
2549       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2550       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2551       State.set(Def, Add, VPIteration(Part, Lane));
2552       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2553                                             Part, Lane);
2554     }
2555   }
2556 }
2557 
2558 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2559                                                     const VPIteration &Instance,
2560                                                     VPTransformState &State) {
2561   Value *ScalarInst = State.get(Def, Instance);
2562   Value *VectorValue = State.get(Def, Instance.Part);
2563   VectorValue = Builder.CreateInsertElement(
2564       VectorValue, ScalarInst,
2565       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2566   State.set(Def, VectorValue, Instance.Part);
2567 }
2568 
2569 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2570   assert(Vec->getType()->isVectorTy() && "Invalid type");
2571   assert(!VF.isScalable() && "Cannot reverse scalable vectors");
2572   SmallVector<int, 8> ShuffleMask;
2573   for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
2574     ShuffleMask.push_back(VF.getKnownMinValue() - i - 1);
2575 
2576   return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse");
2577 }
2578 
2579 // Return whether we allow using masked interleave-groups (for dealing with
2580 // strided loads/stores that reside in predicated blocks, or for dealing
2581 // with gaps).
2582 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2583   // If an override option has been passed in for interleaved accesses, use it.
2584   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2585     return EnableMaskedInterleavedMemAccesses;
2586 
2587   return TTI.enableMaskedInterleavedAccessVectorization();
2588 }
2589 
2590 // Try to vectorize the interleave group that \p Instr belongs to.
2591 //
2592 // E.g. Translate following interleaved load group (factor = 3):
2593 //   for (i = 0; i < N; i+=3) {
2594 //     R = Pic[i];             // Member of index 0
2595 //     G = Pic[i+1];           // Member of index 1
2596 //     B = Pic[i+2];           // Member of index 2
2597 //     ... // do something to R, G, B
2598 //   }
2599 // To:
2600 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2601 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2602 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2603 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2604 //
2605 // Or translate following interleaved store group (factor = 3):
2606 //   for (i = 0; i < N; i+=3) {
2607 //     ... do something to R, G, B
2608 //     Pic[i]   = R;           // Member of index 0
2609 //     Pic[i+1] = G;           // Member of index 1
2610 //     Pic[i+2] = B;           // Member of index 2
2611 //   }
2612 // To:
2613 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2614 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2615 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2616 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2617 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2618 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2619     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2620     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2621     VPValue *BlockInMask) {
2622   Instruction *Instr = Group->getInsertPos();
2623   const DataLayout &DL = Instr->getModule()->getDataLayout();
2624 
2625   // Prepare for the vector type of the interleaved load/store.
2626   Type *ScalarTy = getMemInstValueType(Instr);
2627   unsigned InterleaveFactor = Group->getFactor();
2628   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2629   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2630 
2631   // Prepare for the new pointers.
2632   SmallVector<Value *, 2> AddrParts;
2633   unsigned Index = Group->getIndex(Instr);
2634 
2635   // TODO: extend the masked interleaved-group support to reversed access.
2636   assert((!BlockInMask || !Group->isReverse()) &&
2637          "Reversed masked interleave-group not supported.");
2638 
2639   // If the group is reverse, adjust the index to refer to the last vector lane
2640   // instead of the first. We adjust the index from the first vector lane,
2641   // rather than directly getting the pointer for lane VF - 1, because the
2642   // pointer operand of the interleaved access is supposed to be uniform. For
2643   // uniform instructions, we're only required to generate a value for the
2644   // first vector lane in each unroll iteration.
2645   assert(!VF.isScalable() &&
2646          "scalable vector reverse operation is not implemented");
2647   if (Group->isReverse())
2648     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2649 
2650   for (unsigned Part = 0; Part < UF; Part++) {
2651     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2652     setDebugLocFromInst(Builder, AddrPart);
2653 
2654     // Notice current instruction could be any index. Need to adjust the address
2655     // to the member of index 0.
2656     //
2657     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2658     //       b = A[i];       // Member of index 0
2659     // Current pointer is pointed to A[i+1], adjust it to A[i].
2660     //
2661     // E.g.  A[i+1] = a;     // Member of index 1
2662     //       A[i]   = b;     // Member of index 0
2663     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2664     // Current pointer is pointed to A[i+2], adjust it to A[i].
2665 
2666     bool InBounds = false;
2667     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2668       InBounds = gep->isInBounds();
2669     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2670     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2671 
2672     // Cast to the vector pointer type.
2673     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2674     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2675     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2676   }
2677 
2678   setDebugLocFromInst(Builder, Instr);
2679   Value *PoisonVec = PoisonValue::get(VecTy);
2680 
2681   Value *MaskForGaps = nullptr;
2682   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2683     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2684     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2685     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2686   }
2687 
2688   // Vectorize the interleaved load group.
2689   if (isa<LoadInst>(Instr)) {
2690     // For each unroll part, create a wide load for the group.
2691     SmallVector<Value *, 2> NewLoads;
2692     for (unsigned Part = 0; Part < UF; Part++) {
2693       Instruction *NewLoad;
2694       if (BlockInMask || MaskForGaps) {
2695         assert(useMaskedInterleavedAccesses(*TTI) &&
2696                "masked interleaved groups are not allowed.");
2697         Value *GroupMask = MaskForGaps;
2698         if (BlockInMask) {
2699           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2700           assert(!VF.isScalable() && "scalable vectors not yet supported.");
2701           Value *ShuffledMask = Builder.CreateShuffleVector(
2702               BlockInMaskPart,
2703               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2704               "interleaved.mask");
2705           GroupMask = MaskForGaps
2706                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2707                                                 MaskForGaps)
2708                           : ShuffledMask;
2709         }
2710         NewLoad =
2711             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2712                                      GroupMask, PoisonVec, "wide.masked.vec");
2713       }
2714       else
2715         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2716                                             Group->getAlign(), "wide.vec");
2717       Group->addMetadata(NewLoad);
2718       NewLoads.push_back(NewLoad);
2719     }
2720 
2721     // For each member in the group, shuffle out the appropriate data from the
2722     // wide loads.
2723     unsigned J = 0;
2724     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2725       Instruction *Member = Group->getMember(I);
2726 
2727       // Skip the gaps in the group.
2728       if (!Member)
2729         continue;
2730 
2731       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2732       auto StrideMask =
2733           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2734       for (unsigned Part = 0; Part < UF; Part++) {
2735         Value *StridedVec = Builder.CreateShuffleVector(
2736             NewLoads[Part], StrideMask, "strided.vec");
2737 
2738         // If this member has different type, cast the result type.
2739         if (Member->getType() != ScalarTy) {
2740           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2741           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2742           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2743         }
2744 
2745         if (Group->isReverse())
2746           StridedVec = reverseVector(StridedVec);
2747 
2748         State.set(VPDefs[J], StridedVec, Part);
2749       }
2750       ++J;
2751     }
2752     return;
2753   }
2754 
2755   // The sub vector type for current instruction.
2756   assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2757   auto *SubVT = VectorType::get(ScalarTy, VF);
2758 
2759   // Vectorize the interleaved store group.
2760   for (unsigned Part = 0; Part < UF; Part++) {
2761     // Collect the stored vector from each member.
2762     SmallVector<Value *, 4> StoredVecs;
2763     for (unsigned i = 0; i < InterleaveFactor; i++) {
2764       // Interleaved store group doesn't allow a gap, so each index has a member
2765       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2766 
2767       Value *StoredVec = State.get(StoredValues[i], Part);
2768 
2769       if (Group->isReverse())
2770         StoredVec = reverseVector(StoredVec);
2771 
2772       // If this member has different type, cast it to a unified type.
2773 
2774       if (StoredVec->getType() != SubVT)
2775         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2776 
2777       StoredVecs.push_back(StoredVec);
2778     }
2779 
2780     // Concatenate all vectors into a wide vector.
2781     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2782 
2783     // Interleave the elements in the wide vector.
2784     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2785     Value *IVec = Builder.CreateShuffleVector(
2786         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2787         "interleaved.vec");
2788 
2789     Instruction *NewStoreInstr;
2790     if (BlockInMask) {
2791       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2792       Value *ShuffledMask = Builder.CreateShuffleVector(
2793           BlockInMaskPart,
2794           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2795           "interleaved.mask");
2796       NewStoreInstr = Builder.CreateMaskedStore(
2797           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2798     }
2799     else
2800       NewStoreInstr =
2801           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2802 
2803     Group->addMetadata(NewStoreInstr);
2804   }
2805 }
2806 
2807 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2808     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2809     VPValue *StoredValue, VPValue *BlockInMask) {
2810   // Attempt to issue a wide load.
2811   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2812   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2813 
2814   assert((LI || SI) && "Invalid Load/Store instruction");
2815   assert((!SI || StoredValue) && "No stored value provided for widened store");
2816   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2817 
2818   LoopVectorizationCostModel::InstWidening Decision =
2819       Cost->getWideningDecision(Instr, VF);
2820   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2821           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2822           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2823          "CM decision is not to widen the memory instruction");
2824 
2825   Type *ScalarDataTy = getMemInstValueType(Instr);
2826 
2827   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2828   const Align Alignment = getLoadStoreAlignment(Instr);
2829 
2830   // Determine if the pointer operand of the access is either consecutive or
2831   // reverse consecutive.
2832   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2833   bool ConsecutiveStride =
2834       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2835   bool CreateGatherScatter =
2836       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2837 
2838   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2839   // gather/scatter. Otherwise Decision should have been to Scalarize.
2840   assert((ConsecutiveStride || CreateGatherScatter) &&
2841          "The instruction should be scalarized");
2842   (void)ConsecutiveStride;
2843 
2844   VectorParts BlockInMaskParts(UF);
2845   bool isMaskRequired = BlockInMask;
2846   if (isMaskRequired)
2847     for (unsigned Part = 0; Part < UF; ++Part)
2848       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2849 
2850   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2851     // Calculate the pointer for the specific unroll-part.
2852     GetElementPtrInst *PartPtr = nullptr;
2853 
2854     bool InBounds = false;
2855     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2856       InBounds = gep->isInBounds();
2857 
2858     if (Reverse) {
2859       assert(!VF.isScalable() &&
2860              "Reversing vectors is not yet supported for scalable vectors.");
2861 
2862       // If the address is consecutive but reversed, then the
2863       // wide store needs to start at the last vector element.
2864       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2865           ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue())));
2866       PartPtr->setIsInBounds(InBounds);
2867       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2868           ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue())));
2869       PartPtr->setIsInBounds(InBounds);
2870       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2871         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2872     } else {
2873       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2874       PartPtr = cast<GetElementPtrInst>(
2875           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2876       PartPtr->setIsInBounds(InBounds);
2877     }
2878 
2879     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2880     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2881   };
2882 
2883   // Handle Stores:
2884   if (SI) {
2885     setDebugLocFromInst(Builder, SI);
2886 
2887     for (unsigned Part = 0; Part < UF; ++Part) {
2888       Instruction *NewSI = nullptr;
2889       Value *StoredVal = State.get(StoredValue, Part);
2890       if (CreateGatherScatter) {
2891         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2892         Value *VectorGep = State.get(Addr, Part);
2893         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2894                                             MaskPart);
2895       } else {
2896         if (Reverse) {
2897           // If we store to reverse consecutive memory locations, then we need
2898           // to reverse the order of elements in the stored value.
2899           StoredVal = reverseVector(StoredVal);
2900           // We don't want to update the value in the map as it might be used in
2901           // another expression. So don't call resetVectorValue(StoredVal).
2902         }
2903         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2904         if (isMaskRequired)
2905           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2906                                             BlockInMaskParts[Part]);
2907         else
2908           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2909       }
2910       addMetadata(NewSI, SI);
2911     }
2912     return;
2913   }
2914 
2915   // Handle loads.
2916   assert(LI && "Must have a load instruction");
2917   setDebugLocFromInst(Builder, LI);
2918   for (unsigned Part = 0; Part < UF; ++Part) {
2919     Value *NewLI;
2920     if (CreateGatherScatter) {
2921       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2922       Value *VectorGep = State.get(Addr, Part);
2923       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2924                                          nullptr, "wide.masked.gather");
2925       addMetadata(NewLI, LI);
2926     } else {
2927       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2928       if (isMaskRequired)
2929         NewLI = Builder.CreateMaskedLoad(
2930             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2931             "wide.masked.load");
2932       else
2933         NewLI =
2934             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2935 
2936       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2937       addMetadata(NewLI, LI);
2938       if (Reverse)
2939         NewLI = reverseVector(NewLI);
2940     }
2941 
2942     State.set(Def, NewLI, Part);
2943   }
2944 }
2945 
2946 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
2947                                                VPUser &User,
2948                                                const VPIteration &Instance,
2949                                                bool IfPredicateInstr,
2950                                                VPTransformState &State) {
2951   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2952 
2953   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2954   // the first lane and part.
2955   if (isa<NoAliasScopeDeclInst>(Instr))
2956     if (!Instance.isFirstIteration())
2957       return;
2958 
2959   setDebugLocFromInst(Builder, Instr);
2960 
2961   // Does this instruction return a value ?
2962   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2963 
2964   Instruction *Cloned = Instr->clone();
2965   if (!IsVoidRetTy)
2966     Cloned->setName(Instr->getName() + ".cloned");
2967 
2968   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2969                                Builder.GetInsertPoint());
2970   // Replace the operands of the cloned instructions with their scalar
2971   // equivalents in the new loop.
2972   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2973     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
2974     auto InputInstance = Instance;
2975     if (!Operand || !OrigLoop->contains(Operand) ||
2976         (Cost->isUniformAfterVectorization(Operand, State.VF)))
2977       InputInstance.Lane = VPLane::getFirstLane();
2978     auto *NewOp = State.get(User.getOperand(op), InputInstance);
2979     Cloned->setOperand(op, NewOp);
2980   }
2981   addNewMetadata(Cloned, Instr);
2982 
2983   // Place the cloned scalar in the new loop.
2984   Builder.Insert(Cloned);
2985 
2986   State.set(Def, Cloned, Instance);
2987 
2988   // If we just cloned a new assumption, add it the assumption cache.
2989   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2990     if (II->getIntrinsicID() == Intrinsic::assume)
2991       AC->registerAssumption(II);
2992 
2993   // End if-block.
2994   if (IfPredicateInstr)
2995     PredicatedInstructions.push_back(Cloned);
2996 }
2997 
2998 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2999                                                       Value *End, Value *Step,
3000                                                       Instruction *DL) {
3001   BasicBlock *Header = L->getHeader();
3002   BasicBlock *Latch = L->getLoopLatch();
3003   // As we're just creating this loop, it's possible no latch exists
3004   // yet. If so, use the header as this will be a single block loop.
3005   if (!Latch)
3006     Latch = Header;
3007 
3008   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3009   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3010   setDebugLocFromInst(Builder, OldInst);
3011   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3012 
3013   Builder.SetInsertPoint(Latch->getTerminator());
3014   setDebugLocFromInst(Builder, OldInst);
3015 
3016   // Create i+1 and fill the PHINode.
3017   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
3018   Induction->addIncoming(Start, L->getLoopPreheader());
3019   Induction->addIncoming(Next, Latch);
3020   // Create the compare.
3021   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3022   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3023 
3024   // Now we have two terminators. Remove the old one from the block.
3025   Latch->getTerminator()->eraseFromParent();
3026 
3027   return Induction;
3028 }
3029 
3030 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3031   if (TripCount)
3032     return TripCount;
3033 
3034   assert(L && "Create Trip Count for null loop.");
3035   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3036   // Find the loop boundaries.
3037   ScalarEvolution *SE = PSE.getSE();
3038   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3039   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3040          "Invalid loop count");
3041 
3042   Type *IdxTy = Legal->getWidestInductionType();
3043   assert(IdxTy && "No type for induction");
3044 
3045   // The exit count might have the type of i64 while the phi is i32. This can
3046   // happen if we have an induction variable that is sign extended before the
3047   // compare. The only way that we get a backedge taken count is that the
3048   // induction variable was signed and as such will not overflow. In such a case
3049   // truncation is legal.
3050   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3051       IdxTy->getPrimitiveSizeInBits())
3052     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3053   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3054 
3055   // Get the total trip count from the count by adding 1.
3056   const SCEV *ExitCount = SE->getAddExpr(
3057       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3058 
3059   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3060 
3061   // Expand the trip count and place the new instructions in the preheader.
3062   // Notice that the pre-header does not change, only the loop body.
3063   SCEVExpander Exp(*SE, DL, "induction");
3064 
3065   // Count holds the overall loop count (N).
3066   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3067                                 L->getLoopPreheader()->getTerminator());
3068 
3069   if (TripCount->getType()->isPointerTy())
3070     TripCount =
3071         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3072                                     L->getLoopPreheader()->getTerminator());
3073 
3074   return TripCount;
3075 }
3076 
3077 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3078   if (VectorTripCount)
3079     return VectorTripCount;
3080 
3081   Value *TC = getOrCreateTripCount(L);
3082   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3083 
3084   Type *Ty = TC->getType();
3085   // This is where we can make the step a runtime constant.
3086   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3087 
3088   // If the tail is to be folded by masking, round the number of iterations N
3089   // up to a multiple of Step instead of rounding down. This is done by first
3090   // adding Step-1 and then rounding down. Note that it's ok if this addition
3091   // overflows: the vector induction variable will eventually wrap to zero given
3092   // that it starts at zero and its Step is a power of two; the loop will then
3093   // exit, with the last early-exit vector comparison also producing all-true.
3094   if (Cost->foldTailByMasking()) {
3095     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3096            "VF*UF must be a power of 2 when folding tail by masking");
3097     assert(!VF.isScalable() &&
3098            "Tail folding not yet supported for scalable vectors");
3099     TC = Builder.CreateAdd(
3100         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3101   }
3102 
3103   // Now we need to generate the expression for the part of the loop that the
3104   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3105   // iterations are not required for correctness, or N - Step, otherwise. Step
3106   // is equal to the vectorization factor (number of SIMD elements) times the
3107   // unroll factor (number of SIMD instructions).
3108   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3109 
3110   // There are two cases where we need to ensure (at least) the last iteration
3111   // runs in the scalar remainder loop. Thus, if the step evenly divides
3112   // the trip count, we set the remainder to be equal to the step. If the step
3113   // does not evenly divide the trip count, no adjustment is necessary since
3114   // there will already be scalar iterations. Note that the minimum iterations
3115   // check ensures that N >= Step. The cases are:
3116   // 1) If there is a non-reversed interleaved group that may speculatively
3117   //    access memory out-of-bounds.
3118   // 2) If any instruction may follow a conditionally taken exit. That is, if
3119   //    the loop contains multiple exiting blocks, or a single exiting block
3120   //    which is not the latch.
3121   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3122     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3123     R = Builder.CreateSelect(IsZero, Step, R);
3124   }
3125 
3126   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3127 
3128   return VectorTripCount;
3129 }
3130 
3131 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3132                                                    const DataLayout &DL) {
3133   // Verify that V is a vector type with same number of elements as DstVTy.
3134   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3135   unsigned VF = DstFVTy->getNumElements();
3136   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3137   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3138   Type *SrcElemTy = SrcVecTy->getElementType();
3139   Type *DstElemTy = DstFVTy->getElementType();
3140   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3141          "Vector elements must have same size");
3142 
3143   // Do a direct cast if element types are castable.
3144   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3145     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3146   }
3147   // V cannot be directly casted to desired vector type.
3148   // May happen when V is a floating point vector but DstVTy is a vector of
3149   // pointers or vice-versa. Handle this using a two-step bitcast using an
3150   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3151   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3152          "Only one type should be a pointer type");
3153   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3154          "Only one type should be a floating point type");
3155   Type *IntTy =
3156       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3157   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3158   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3159   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3160 }
3161 
3162 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3163                                                          BasicBlock *Bypass) {
3164   Value *Count = getOrCreateTripCount(L);
3165   // Reuse existing vector loop preheader for TC checks.
3166   // Note that new preheader block is generated for vector loop.
3167   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3168   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3169 
3170   // Generate code to check if the loop's trip count is less than VF * UF, or
3171   // equal to it in case a scalar epilogue is required; this implies that the
3172   // vector trip count is zero. This check also covers the case where adding one
3173   // to the backedge-taken count overflowed leading to an incorrect trip count
3174   // of zero. In this case we will also jump to the scalar loop.
3175   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3176                                           : ICmpInst::ICMP_ULT;
3177 
3178   // If tail is to be folded, vector loop takes care of all iterations.
3179   Value *CheckMinIters = Builder.getFalse();
3180   if (!Cost->foldTailByMasking()) {
3181     Value *Step =
3182         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3183     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3184   }
3185   // Create new preheader for vector loop.
3186   LoopVectorPreHeader =
3187       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3188                  "vector.ph");
3189 
3190   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3191                                DT->getNode(Bypass)->getIDom()) &&
3192          "TC check is expected to dominate Bypass");
3193 
3194   // Update dominator for Bypass & LoopExit.
3195   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3196   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3197 
3198   ReplaceInstWithInst(
3199       TCCheckBlock->getTerminator(),
3200       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3201   LoopBypassBlocks.push_back(TCCheckBlock);
3202 }
3203 
3204 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3205 
3206   BasicBlock *const SCEVCheckBlock =
3207       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3208   if (!SCEVCheckBlock)
3209     return nullptr;
3210 
3211   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3212            (OptForSizeBasedOnProfile &&
3213             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3214          "Cannot SCEV check stride or overflow when optimizing for size");
3215 
3216 
3217   // Update dominator only if this is first RT check.
3218   if (LoopBypassBlocks.empty()) {
3219     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3220     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3221   }
3222 
3223   LoopBypassBlocks.push_back(SCEVCheckBlock);
3224   AddedSafetyChecks = true;
3225   return SCEVCheckBlock;
3226 }
3227 
3228 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3229                                                       BasicBlock *Bypass) {
3230   // VPlan-native path does not do any analysis for runtime checks currently.
3231   if (EnableVPlanNativePath)
3232     return nullptr;
3233 
3234   BasicBlock *const MemCheckBlock =
3235       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3236 
3237   // Check if we generated code that checks in runtime if arrays overlap. We put
3238   // the checks into a separate block to make the more common case of few
3239   // elements faster.
3240   if (!MemCheckBlock)
3241     return nullptr;
3242 
3243   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3244     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3245            "Cannot emit memory checks when optimizing for size, unless forced "
3246            "to vectorize.");
3247     ORE->emit([&]() {
3248       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3249                                         L->getStartLoc(), L->getHeader())
3250              << "Code-size may be reduced by not forcing "
3251                 "vectorization, or by source-code modifications "
3252                 "eliminating the need for runtime checks "
3253                 "(e.g., adding 'restrict').";
3254     });
3255   }
3256 
3257   LoopBypassBlocks.push_back(MemCheckBlock);
3258 
3259   AddedSafetyChecks = true;
3260 
3261   // We currently don't use LoopVersioning for the actual loop cloning but we
3262   // still use it to add the noalias metadata.
3263   LVer = std::make_unique<LoopVersioning>(
3264       *Legal->getLAI(),
3265       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3266       DT, PSE.getSE());
3267   LVer->prepareNoAliasMetadata();
3268   return MemCheckBlock;
3269 }
3270 
3271 Value *InnerLoopVectorizer::emitTransformedIndex(
3272     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3273     const InductionDescriptor &ID) const {
3274 
3275   SCEVExpander Exp(*SE, DL, "induction");
3276   auto Step = ID.getStep();
3277   auto StartValue = ID.getStartValue();
3278   assert(Index->getType() == Step->getType() &&
3279          "Index type does not match StepValue type");
3280 
3281   // Note: the IR at this point is broken. We cannot use SE to create any new
3282   // SCEV and then expand it, hoping that SCEV's simplification will give us
3283   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3284   // lead to various SCEV crashes. So all we can do is to use builder and rely
3285   // on InstCombine for future simplifications. Here we handle some trivial
3286   // cases only.
3287   auto CreateAdd = [&B](Value *X, Value *Y) {
3288     assert(X->getType() == Y->getType() && "Types don't match!");
3289     if (auto *CX = dyn_cast<ConstantInt>(X))
3290       if (CX->isZero())
3291         return Y;
3292     if (auto *CY = dyn_cast<ConstantInt>(Y))
3293       if (CY->isZero())
3294         return X;
3295     return B.CreateAdd(X, Y);
3296   };
3297 
3298   auto CreateMul = [&B](Value *X, Value *Y) {
3299     assert(X->getType() == Y->getType() && "Types don't match!");
3300     if (auto *CX = dyn_cast<ConstantInt>(X))
3301       if (CX->isOne())
3302         return Y;
3303     if (auto *CY = dyn_cast<ConstantInt>(Y))
3304       if (CY->isOne())
3305         return X;
3306     return B.CreateMul(X, Y);
3307   };
3308 
3309   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3310   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3311   // the DomTree is not kept up-to-date for additional blocks generated in the
3312   // vector loop. By using the header as insertion point, we guarantee that the
3313   // expanded instructions dominate all their uses.
3314   auto GetInsertPoint = [this, &B]() {
3315     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3316     if (InsertBB != LoopVectorBody &&
3317         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3318       return LoopVectorBody->getTerminator();
3319     return &*B.GetInsertPoint();
3320   };
3321 
3322   switch (ID.getKind()) {
3323   case InductionDescriptor::IK_IntInduction: {
3324     assert(Index->getType() == StartValue->getType() &&
3325            "Index type does not match StartValue type");
3326     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3327       return B.CreateSub(StartValue, Index);
3328     auto *Offset = CreateMul(
3329         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3330     return CreateAdd(StartValue, Offset);
3331   }
3332   case InductionDescriptor::IK_PtrInduction: {
3333     assert(isa<SCEVConstant>(Step) &&
3334            "Expected constant step for pointer induction");
3335     return B.CreateGEP(
3336         StartValue->getType()->getPointerElementType(), StartValue,
3337         CreateMul(Index,
3338                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3339   }
3340   case InductionDescriptor::IK_FpInduction: {
3341     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3342     auto InductionBinOp = ID.getInductionBinOp();
3343     assert(InductionBinOp &&
3344            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3345             InductionBinOp->getOpcode() == Instruction::FSub) &&
3346            "Original bin op should be defined for FP induction");
3347 
3348     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3349     Value *MulExp = B.CreateFMul(StepValue, Index);
3350     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3351                          "induction");
3352   }
3353   case InductionDescriptor::IK_NoInduction:
3354     return nullptr;
3355   }
3356   llvm_unreachable("invalid enum");
3357 }
3358 
3359 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3360   LoopScalarBody = OrigLoop->getHeader();
3361   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3362   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3363   assert(LoopExitBlock && "Must have an exit block");
3364   assert(LoopVectorPreHeader && "Invalid loop structure");
3365 
3366   LoopMiddleBlock =
3367       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3368                  LI, nullptr, Twine(Prefix) + "middle.block");
3369   LoopScalarPreHeader =
3370       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3371                  nullptr, Twine(Prefix) + "scalar.ph");
3372 
3373   // Set up branch from middle block to the exit and scalar preheader blocks.
3374   // completeLoopSkeleton will update the condition to use an iteration check,
3375   // if required to decide whether to execute the remainder.
3376   BranchInst *BrInst =
3377       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3378   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3379   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3380   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3381 
3382   // We intentionally don't let SplitBlock to update LoopInfo since
3383   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3384   // LoopVectorBody is explicitly added to the correct place few lines later.
3385   LoopVectorBody =
3386       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3387                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3388 
3389   // Update dominator for loop exit.
3390   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3391 
3392   // Create and register the new vector loop.
3393   Loop *Lp = LI->AllocateLoop();
3394   Loop *ParentLoop = OrigLoop->getParentLoop();
3395 
3396   // Insert the new loop into the loop nest and register the new basic blocks
3397   // before calling any utilities such as SCEV that require valid LoopInfo.
3398   if (ParentLoop) {
3399     ParentLoop->addChildLoop(Lp);
3400   } else {
3401     LI->addTopLevelLoop(Lp);
3402   }
3403   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3404   return Lp;
3405 }
3406 
3407 void InnerLoopVectorizer::createInductionResumeValues(
3408     Loop *L, Value *VectorTripCount,
3409     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3410   assert(VectorTripCount && L && "Expected valid arguments");
3411   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3412           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3413          "Inconsistent information about additional bypass.");
3414   // We are going to resume the execution of the scalar loop.
3415   // Go over all of the induction variables that we found and fix the
3416   // PHIs that are left in the scalar version of the loop.
3417   // The starting values of PHI nodes depend on the counter of the last
3418   // iteration in the vectorized loop.
3419   // If we come from a bypass edge then we need to start from the original
3420   // start value.
3421   for (auto &InductionEntry : Legal->getInductionVars()) {
3422     PHINode *OrigPhi = InductionEntry.first;
3423     InductionDescriptor II = InductionEntry.second;
3424 
3425     // Create phi nodes to merge from the  backedge-taken check block.
3426     PHINode *BCResumeVal =
3427         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3428                         LoopScalarPreHeader->getTerminator());
3429     // Copy original phi DL over to the new one.
3430     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3431     Value *&EndValue = IVEndValues[OrigPhi];
3432     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3433     if (OrigPhi == OldInduction) {
3434       // We know what the end value is.
3435       EndValue = VectorTripCount;
3436     } else {
3437       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3438 
3439       // Fast-math-flags propagate from the original induction instruction.
3440       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3441         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3442 
3443       Type *StepType = II.getStep()->getType();
3444       Instruction::CastOps CastOp =
3445           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3446       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3447       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3448       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3449       EndValue->setName("ind.end");
3450 
3451       // Compute the end value for the additional bypass (if applicable).
3452       if (AdditionalBypass.first) {
3453         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3454         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3455                                          StepType, true);
3456         CRD =
3457             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3458         EndValueFromAdditionalBypass =
3459             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3460         EndValueFromAdditionalBypass->setName("ind.end");
3461       }
3462     }
3463     // The new PHI merges the original incoming value, in case of a bypass,
3464     // or the value at the end of the vectorized loop.
3465     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3466 
3467     // Fix the scalar body counter (PHI node).
3468     // The old induction's phi node in the scalar body needs the truncated
3469     // value.
3470     for (BasicBlock *BB : LoopBypassBlocks)
3471       BCResumeVal->addIncoming(II.getStartValue(), BB);
3472 
3473     if (AdditionalBypass.first)
3474       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3475                                             EndValueFromAdditionalBypass);
3476 
3477     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3478   }
3479 }
3480 
3481 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3482                                                       MDNode *OrigLoopID) {
3483   assert(L && "Expected valid loop.");
3484 
3485   // The trip counts should be cached by now.
3486   Value *Count = getOrCreateTripCount(L);
3487   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3488 
3489   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3490 
3491   // Add a check in the middle block to see if we have completed
3492   // all of the iterations in the first vector loop.
3493   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3494   // If tail is to be folded, we know we don't need to run the remainder.
3495   if (!Cost->foldTailByMasking()) {
3496     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3497                                         Count, VectorTripCount, "cmp.n",
3498                                         LoopMiddleBlock->getTerminator());
3499 
3500     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3501     // of the corresponding compare because they may have ended up with
3502     // different line numbers and we want to avoid awkward line stepping while
3503     // debugging. Eg. if the compare has got a line number inside the loop.
3504     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3505     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3506   }
3507 
3508   // Get ready to start creating new instructions into the vectorized body.
3509   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3510          "Inconsistent vector loop preheader");
3511   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3512 
3513   Optional<MDNode *> VectorizedLoopID =
3514       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3515                                       LLVMLoopVectorizeFollowupVectorized});
3516   if (VectorizedLoopID.hasValue()) {
3517     L->setLoopID(VectorizedLoopID.getValue());
3518 
3519     // Do not setAlreadyVectorized if loop attributes have been defined
3520     // explicitly.
3521     return LoopVectorPreHeader;
3522   }
3523 
3524   // Keep all loop hints from the original loop on the vector loop (we'll
3525   // replace the vectorizer-specific hints below).
3526   if (MDNode *LID = OrigLoop->getLoopID())
3527     L->setLoopID(LID);
3528 
3529   LoopVectorizeHints Hints(L, true, *ORE);
3530   Hints.setAlreadyVectorized();
3531 
3532 #ifdef EXPENSIVE_CHECKS
3533   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3534   LI->verify(*DT);
3535 #endif
3536 
3537   return LoopVectorPreHeader;
3538 }
3539 
3540 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3541   /*
3542    In this function we generate a new loop. The new loop will contain
3543    the vectorized instructions while the old loop will continue to run the
3544    scalar remainder.
3545 
3546        [ ] <-- loop iteration number check.
3547     /   |
3548    /    v
3549   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3550   |  /  |
3551   | /   v
3552   ||   [ ]     <-- vector pre header.
3553   |/    |
3554   |     v
3555   |    [  ] \
3556   |    [  ]_|   <-- vector loop.
3557   |     |
3558   |     v
3559   |   -[ ]   <--- middle-block.
3560   |  /  |
3561   | /   v
3562   -|- >[ ]     <--- new preheader.
3563    |    |
3564    |    v
3565    |   [ ] \
3566    |   [ ]_|   <-- old scalar loop to handle remainder.
3567     \   |
3568      \  v
3569       >[ ]     <-- exit block.
3570    ...
3571    */
3572 
3573   // Get the metadata of the original loop before it gets modified.
3574   MDNode *OrigLoopID = OrigLoop->getLoopID();
3575 
3576   // Create an empty vector loop, and prepare basic blocks for the runtime
3577   // checks.
3578   Loop *Lp = createVectorLoopSkeleton("");
3579 
3580   // Now, compare the new count to zero. If it is zero skip the vector loop and
3581   // jump to the scalar loop. This check also covers the case where the
3582   // backedge-taken count is uint##_max: adding one to it will overflow leading
3583   // to an incorrect trip count of zero. In this (rare) case we will also jump
3584   // to the scalar loop.
3585   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3586 
3587   // Generate the code to check any assumptions that we've made for SCEV
3588   // expressions.
3589   emitSCEVChecks(Lp, LoopScalarPreHeader);
3590 
3591   // Generate the code that checks in runtime if arrays overlap. We put the
3592   // checks into a separate block to make the more common case of few elements
3593   // faster.
3594   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3595 
3596   // Some loops have a single integer induction variable, while other loops
3597   // don't. One example is c++ iterators that often have multiple pointer
3598   // induction variables. In the code below we also support a case where we
3599   // don't have a single induction variable.
3600   //
3601   // We try to obtain an induction variable from the original loop as hard
3602   // as possible. However if we don't find one that:
3603   //   - is an integer
3604   //   - counts from zero, stepping by one
3605   //   - is the size of the widest induction variable type
3606   // then we create a new one.
3607   OldInduction = Legal->getPrimaryInduction();
3608   Type *IdxTy = Legal->getWidestInductionType();
3609   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3610   // The loop step is equal to the vectorization factor (num of SIMD elements)
3611   // times the unroll factor (num of SIMD instructions).
3612   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3613   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3614   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3615   Induction =
3616       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3617                               getDebugLocFromInstOrOperands(OldInduction));
3618 
3619   // Emit phis for the new starting index of the scalar loop.
3620   createInductionResumeValues(Lp, CountRoundDown);
3621 
3622   return completeLoopSkeleton(Lp, OrigLoopID);
3623 }
3624 
3625 // Fix up external users of the induction variable. At this point, we are
3626 // in LCSSA form, with all external PHIs that use the IV having one input value,
3627 // coming from the remainder loop. We need those PHIs to also have a correct
3628 // value for the IV when arriving directly from the middle block.
3629 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3630                                        const InductionDescriptor &II,
3631                                        Value *CountRoundDown, Value *EndValue,
3632                                        BasicBlock *MiddleBlock) {
3633   // There are two kinds of external IV usages - those that use the value
3634   // computed in the last iteration (the PHI) and those that use the penultimate
3635   // value (the value that feeds into the phi from the loop latch).
3636   // We allow both, but they, obviously, have different values.
3637 
3638   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3639 
3640   DenseMap<Value *, Value *> MissingVals;
3641 
3642   // An external user of the last iteration's value should see the value that
3643   // the remainder loop uses to initialize its own IV.
3644   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3645   for (User *U : PostInc->users()) {
3646     Instruction *UI = cast<Instruction>(U);
3647     if (!OrigLoop->contains(UI)) {
3648       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3649       MissingVals[UI] = EndValue;
3650     }
3651   }
3652 
3653   // An external user of the penultimate value need to see EndValue - Step.
3654   // The simplest way to get this is to recompute it from the constituent SCEVs,
3655   // that is Start + (Step * (CRD - 1)).
3656   for (User *U : OrigPhi->users()) {
3657     auto *UI = cast<Instruction>(U);
3658     if (!OrigLoop->contains(UI)) {
3659       const DataLayout &DL =
3660           OrigLoop->getHeader()->getModule()->getDataLayout();
3661       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3662 
3663       IRBuilder<> B(MiddleBlock->getTerminator());
3664 
3665       // Fast-math-flags propagate from the original induction instruction.
3666       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3667         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3668 
3669       Value *CountMinusOne = B.CreateSub(
3670           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3671       Value *CMO =
3672           !II.getStep()->getType()->isIntegerTy()
3673               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3674                              II.getStep()->getType())
3675               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3676       CMO->setName("cast.cmo");
3677       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3678       Escape->setName("ind.escape");
3679       MissingVals[UI] = Escape;
3680     }
3681   }
3682 
3683   for (auto &I : MissingVals) {
3684     PHINode *PHI = cast<PHINode>(I.first);
3685     // One corner case we have to handle is two IVs "chasing" each-other,
3686     // that is %IV2 = phi [...], [ %IV1, %latch ]
3687     // In this case, if IV1 has an external use, we need to avoid adding both
3688     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3689     // don't already have an incoming value for the middle block.
3690     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3691       PHI->addIncoming(I.second, MiddleBlock);
3692   }
3693 }
3694 
3695 namespace {
3696 
3697 struct CSEDenseMapInfo {
3698   static bool canHandle(const Instruction *I) {
3699     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3700            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3701   }
3702 
3703   static inline Instruction *getEmptyKey() {
3704     return DenseMapInfo<Instruction *>::getEmptyKey();
3705   }
3706 
3707   static inline Instruction *getTombstoneKey() {
3708     return DenseMapInfo<Instruction *>::getTombstoneKey();
3709   }
3710 
3711   static unsigned getHashValue(const Instruction *I) {
3712     assert(canHandle(I) && "Unknown instruction!");
3713     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3714                                                            I->value_op_end()));
3715   }
3716 
3717   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3718     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3719         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3720       return LHS == RHS;
3721     return LHS->isIdenticalTo(RHS);
3722   }
3723 };
3724 
3725 } // end anonymous namespace
3726 
3727 ///Perform cse of induction variable instructions.
3728 static void cse(BasicBlock *BB) {
3729   // Perform simple cse.
3730   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3731   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3732     Instruction *In = &*I++;
3733 
3734     if (!CSEDenseMapInfo::canHandle(In))
3735       continue;
3736 
3737     // Check if we can replace this instruction with any of the
3738     // visited instructions.
3739     if (Instruction *V = CSEMap.lookup(In)) {
3740       In->replaceAllUsesWith(V);
3741       In->eraseFromParent();
3742       continue;
3743     }
3744 
3745     CSEMap[In] = In;
3746   }
3747 }
3748 
3749 InstructionCost
3750 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3751                                               bool &NeedToScalarize) {
3752   Function *F = CI->getCalledFunction();
3753   Type *ScalarRetTy = CI->getType();
3754   SmallVector<Type *, 4> Tys, ScalarTys;
3755   for (auto &ArgOp : CI->arg_operands())
3756     ScalarTys.push_back(ArgOp->getType());
3757 
3758   // Estimate cost of scalarized vector call. The source operands are assumed
3759   // to be vectors, so we need to extract individual elements from there,
3760   // execute VF scalar calls, and then gather the result into the vector return
3761   // value.
3762   InstructionCost ScalarCallCost =
3763       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3764   if (VF.isScalar())
3765     return ScalarCallCost;
3766 
3767   // Compute corresponding vector type for return value and arguments.
3768   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3769   for (Type *ScalarTy : ScalarTys)
3770     Tys.push_back(ToVectorTy(ScalarTy, VF));
3771 
3772   // Compute costs of unpacking argument values for the scalar calls and
3773   // packing the return values to a vector.
3774   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3775 
3776   InstructionCost Cost =
3777       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3778 
3779   // If we can't emit a vector call for this function, then the currently found
3780   // cost is the cost we need to return.
3781   NeedToScalarize = true;
3782   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3783   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3784 
3785   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3786     return Cost;
3787 
3788   // If the corresponding vector cost is cheaper, return its cost.
3789   InstructionCost VectorCallCost =
3790       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3791   if (VectorCallCost < Cost) {
3792     NeedToScalarize = false;
3793     Cost = VectorCallCost;
3794   }
3795   return Cost;
3796 }
3797 
3798 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3799   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3800     return Elt;
3801   return VectorType::get(Elt, VF);
3802 }
3803 
3804 InstructionCost
3805 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3806                                                    ElementCount VF) {
3807   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3808   assert(ID && "Expected intrinsic call!");
3809   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3810   FastMathFlags FMF;
3811   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3812     FMF = FPMO->getFastMathFlags();
3813 
3814   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3815   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3816   SmallVector<Type *> ParamTys;
3817   std::transform(FTy->param_begin(), FTy->param_end(),
3818                  std::back_inserter(ParamTys),
3819                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3820 
3821   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3822                                     dyn_cast<IntrinsicInst>(CI));
3823   return TTI.getIntrinsicInstrCost(CostAttrs,
3824                                    TargetTransformInfo::TCK_RecipThroughput);
3825 }
3826 
3827 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3828   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3829   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3830   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3831 }
3832 
3833 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3834   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3835   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3836   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3837 }
3838 
3839 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3840   // For every instruction `I` in MinBWs, truncate the operands, create a
3841   // truncated version of `I` and reextend its result. InstCombine runs
3842   // later and will remove any ext/trunc pairs.
3843   SmallPtrSet<Value *, 4> Erased;
3844   for (const auto &KV : Cost->getMinimalBitwidths()) {
3845     // If the value wasn't vectorized, we must maintain the original scalar
3846     // type. The absence of the value from State indicates that it
3847     // wasn't vectorized.
3848     VPValue *Def = State.Plan->getVPValue(KV.first);
3849     if (!State.hasAnyVectorValue(Def))
3850       continue;
3851     for (unsigned Part = 0; Part < UF; ++Part) {
3852       Value *I = State.get(Def, Part);
3853       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3854         continue;
3855       Type *OriginalTy = I->getType();
3856       Type *ScalarTruncatedTy =
3857           IntegerType::get(OriginalTy->getContext(), KV.second);
3858       auto *TruncatedTy = FixedVectorType::get(
3859           ScalarTruncatedTy,
3860           cast<FixedVectorType>(OriginalTy)->getNumElements());
3861       if (TruncatedTy == OriginalTy)
3862         continue;
3863 
3864       IRBuilder<> B(cast<Instruction>(I));
3865       auto ShrinkOperand = [&](Value *V) -> Value * {
3866         if (auto *ZI = dyn_cast<ZExtInst>(V))
3867           if (ZI->getSrcTy() == TruncatedTy)
3868             return ZI->getOperand(0);
3869         return B.CreateZExtOrTrunc(V, TruncatedTy);
3870       };
3871 
3872       // The actual instruction modification depends on the instruction type,
3873       // unfortunately.
3874       Value *NewI = nullptr;
3875       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3876         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3877                              ShrinkOperand(BO->getOperand(1)));
3878 
3879         // Any wrapping introduced by shrinking this operation shouldn't be
3880         // considered undefined behavior. So, we can't unconditionally copy
3881         // arithmetic wrapping flags to NewI.
3882         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3883       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3884         NewI =
3885             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3886                          ShrinkOperand(CI->getOperand(1)));
3887       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3888         NewI = B.CreateSelect(SI->getCondition(),
3889                               ShrinkOperand(SI->getTrueValue()),
3890                               ShrinkOperand(SI->getFalseValue()));
3891       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3892         switch (CI->getOpcode()) {
3893         default:
3894           llvm_unreachable("Unhandled cast!");
3895         case Instruction::Trunc:
3896           NewI = ShrinkOperand(CI->getOperand(0));
3897           break;
3898         case Instruction::SExt:
3899           NewI = B.CreateSExtOrTrunc(
3900               CI->getOperand(0),
3901               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3902           break;
3903         case Instruction::ZExt:
3904           NewI = B.CreateZExtOrTrunc(
3905               CI->getOperand(0),
3906               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3907           break;
3908         }
3909       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3910         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3911                              ->getNumElements();
3912         auto *O0 = B.CreateZExtOrTrunc(
3913             SI->getOperand(0),
3914             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3915         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3916                              ->getNumElements();
3917         auto *O1 = B.CreateZExtOrTrunc(
3918             SI->getOperand(1),
3919             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3920 
3921         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3922       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3923         // Don't do anything with the operands, just extend the result.
3924         continue;
3925       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3926         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3927                             ->getNumElements();
3928         auto *O0 = B.CreateZExtOrTrunc(
3929             IE->getOperand(0),
3930             FixedVectorType::get(ScalarTruncatedTy, Elements));
3931         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3932         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3933       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3934         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
3935                             ->getNumElements();
3936         auto *O0 = B.CreateZExtOrTrunc(
3937             EE->getOperand(0),
3938             FixedVectorType::get(ScalarTruncatedTy, Elements));
3939         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3940       } else {
3941         // If we don't know what to do, be conservative and don't do anything.
3942         continue;
3943       }
3944 
3945       // Lastly, extend the result.
3946       NewI->takeName(cast<Instruction>(I));
3947       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3948       I->replaceAllUsesWith(Res);
3949       cast<Instruction>(I)->eraseFromParent();
3950       Erased.insert(I);
3951       State.reset(Def, Res, Part);
3952     }
3953   }
3954 
3955   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3956   for (const auto &KV : Cost->getMinimalBitwidths()) {
3957     // If the value wasn't vectorized, we must maintain the original scalar
3958     // type. The absence of the value from State indicates that it
3959     // wasn't vectorized.
3960     VPValue *Def = State.Plan->getVPValue(KV.first);
3961     if (!State.hasAnyVectorValue(Def))
3962       continue;
3963     for (unsigned Part = 0; Part < UF; ++Part) {
3964       Value *I = State.get(Def, Part);
3965       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3966       if (Inst && Inst->use_empty()) {
3967         Value *NewI = Inst->getOperand(0);
3968         Inst->eraseFromParent();
3969         State.reset(Def, NewI, Part);
3970       }
3971     }
3972   }
3973 }
3974 
3975 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3976   // Insert truncates and extends for any truncated instructions as hints to
3977   // InstCombine.
3978   if (VF.isVector())
3979     truncateToMinimalBitwidths(State);
3980 
3981   // Fix widened non-induction PHIs by setting up the PHI operands.
3982   if (OrigPHIsToFix.size()) {
3983     assert(EnableVPlanNativePath &&
3984            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3985     fixNonInductionPHIs(State);
3986   }
3987 
3988   // At this point every instruction in the original loop is widened to a
3989   // vector form. Now we need to fix the recurrences in the loop. These PHI
3990   // nodes are currently empty because we did not want to introduce cycles.
3991   // This is the second stage of vectorizing recurrences.
3992   fixCrossIterationPHIs(State);
3993 
3994   // Forget the original basic block.
3995   PSE.getSE()->forgetLoop(OrigLoop);
3996 
3997   // Fix-up external users of the induction variables.
3998   for (auto &Entry : Legal->getInductionVars())
3999     fixupIVUsers(Entry.first, Entry.second,
4000                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4001                  IVEndValues[Entry.first], LoopMiddleBlock);
4002 
4003   fixLCSSAPHIs(State);
4004   for (Instruction *PI : PredicatedInstructions)
4005     sinkScalarOperands(&*PI);
4006 
4007   // Remove redundant induction instructions.
4008   cse(LoopVectorBody);
4009 
4010   // Set/update profile weights for the vector and remainder loops as original
4011   // loop iterations are now distributed among them. Note that original loop
4012   // represented by LoopScalarBody becomes remainder loop after vectorization.
4013   //
4014   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4015   // end up getting slightly roughened result but that should be OK since
4016   // profile is not inherently precise anyway. Note also possible bypass of
4017   // vector code caused by legality checks is ignored, assigning all the weight
4018   // to the vector loop, optimistically.
4019   //
4020   // For scalable vectorization we can't know at compile time how many iterations
4021   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4022   // vscale of '1'.
4023   setProfileInfoAfterUnrolling(
4024       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4025       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4026 }
4027 
4028 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4029   // In order to support recurrences we need to be able to vectorize Phi nodes.
4030   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4031   // stage #2: We now need to fix the recurrences by adding incoming edges to
4032   // the currently empty PHI nodes. At this point every instruction in the
4033   // original loop is widened to a vector form so we can use them to construct
4034   // the incoming edges.
4035   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
4036     // Handle first-order recurrences and reductions that need to be fixed.
4037     if (Legal->isFirstOrderRecurrence(&Phi))
4038       fixFirstOrderRecurrence(&Phi, State);
4039     else if (Legal->isReductionVariable(&Phi))
4040       fixReduction(&Phi, State);
4041   }
4042 }
4043 
4044 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
4045                                                   VPTransformState &State) {
4046   // This is the second phase of vectorizing first-order recurrences. An
4047   // overview of the transformation is described below. Suppose we have the
4048   // following loop.
4049   //
4050   //   for (int i = 0; i < n; ++i)
4051   //     b[i] = a[i] - a[i - 1];
4052   //
4053   // There is a first-order recurrence on "a". For this loop, the shorthand
4054   // scalar IR looks like:
4055   //
4056   //   scalar.ph:
4057   //     s_init = a[-1]
4058   //     br scalar.body
4059   //
4060   //   scalar.body:
4061   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4062   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4063   //     s2 = a[i]
4064   //     b[i] = s2 - s1
4065   //     br cond, scalar.body, ...
4066   //
4067   // In this example, s1 is a recurrence because it's value depends on the
4068   // previous iteration. In the first phase of vectorization, we created a
4069   // temporary value for s1. We now complete the vectorization and produce the
4070   // shorthand vector IR shown below (for VF = 4, UF = 1).
4071   //
4072   //   vector.ph:
4073   //     v_init = vector(..., ..., ..., a[-1])
4074   //     br vector.body
4075   //
4076   //   vector.body
4077   //     i = phi [0, vector.ph], [i+4, vector.body]
4078   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4079   //     v2 = a[i, i+1, i+2, i+3];
4080   //     v3 = vector(v1(3), v2(0, 1, 2))
4081   //     b[i, i+1, i+2, i+3] = v2 - v3
4082   //     br cond, vector.body, middle.block
4083   //
4084   //   middle.block:
4085   //     x = v2(3)
4086   //     br scalar.ph
4087   //
4088   //   scalar.ph:
4089   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4090   //     br scalar.body
4091   //
4092   // After execution completes the vector loop, we extract the next value of
4093   // the recurrence (x) to use as the initial value in the scalar loop.
4094 
4095   // Get the original loop preheader and single loop latch.
4096   auto *Preheader = OrigLoop->getLoopPreheader();
4097   auto *Latch = OrigLoop->getLoopLatch();
4098 
4099   // Get the initial and previous values of the scalar recurrence.
4100   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4101   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4102 
4103   // Create a vector from the initial value.
4104   auto *VectorInit = ScalarInit;
4105   if (VF.isVector()) {
4106     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4107     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4108     VectorInit = Builder.CreateInsertElement(
4109         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4110         Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
4111   }
4112 
4113   VPValue *PhiDef = State.Plan->getVPValue(Phi);
4114   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
4115   // We constructed a temporary phi node in the first phase of vectorization.
4116   // This phi node will eventually be deleted.
4117   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
4118 
4119   // Create a phi node for the new recurrence. The current value will either be
4120   // the initial value inserted into a vector or loop-varying vector value.
4121   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4122   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4123 
4124   // Get the vectorized previous value of the last part UF - 1. It appears last
4125   // among all unrolled iterations, due to the order of their construction.
4126   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4127 
4128   // Find and set the insertion point after the previous value if it is an
4129   // instruction.
4130   BasicBlock::iterator InsertPt;
4131   // Note that the previous value may have been constant-folded so it is not
4132   // guaranteed to be an instruction in the vector loop.
4133   // FIXME: Loop invariant values do not form recurrences. We should deal with
4134   //        them earlier.
4135   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4136     InsertPt = LoopVectorBody->getFirstInsertionPt();
4137   else {
4138     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4139     if (isa<PHINode>(PreviousLastPart))
4140       // If the previous value is a phi node, we should insert after all the phi
4141       // nodes in the block containing the PHI to avoid breaking basic block
4142       // verification. Note that the basic block may be different to
4143       // LoopVectorBody, in case we predicate the loop.
4144       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4145     else
4146       InsertPt = ++PreviousInst->getIterator();
4147   }
4148   Builder.SetInsertPoint(&*InsertPt);
4149 
4150   // We will construct a vector for the recurrence by combining the values for
4151   // the current and previous iterations. This is the required shuffle mask.
4152   assert(!VF.isScalable());
4153   SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
4154   ShuffleMask[0] = VF.getKnownMinValue() - 1;
4155   for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
4156     ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
4157 
4158   // The vector from which to take the initial value for the current iteration
4159   // (actual or unrolled). Initially, this is the vector phi node.
4160   Value *Incoming = VecPhi;
4161 
4162   // Shuffle the current and previous vector and update the vector parts.
4163   for (unsigned Part = 0; Part < UF; ++Part) {
4164     Value *PreviousPart = State.get(PreviousDef, Part);
4165     Value *PhiPart = State.get(PhiDef, Part);
4166     auto *Shuffle =
4167         VF.isVector()
4168             ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
4169             : Incoming;
4170     PhiPart->replaceAllUsesWith(Shuffle);
4171     cast<Instruction>(PhiPart)->eraseFromParent();
4172     State.reset(PhiDef, Shuffle, Part);
4173     Incoming = PreviousPart;
4174   }
4175 
4176   // Fix the latch value of the new recurrence in the vector loop.
4177   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4178 
4179   // Extract the last vector element in the middle block. This will be the
4180   // initial value for the recurrence when jumping to the scalar loop.
4181   auto *ExtractForScalar = Incoming;
4182   if (VF.isVector()) {
4183     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4184     ExtractForScalar = Builder.CreateExtractElement(
4185         ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
4186         "vector.recur.extract");
4187   }
4188   // Extract the second last element in the middle block if the
4189   // Phi is used outside the loop. We need to extract the phi itself
4190   // and not the last element (the phi update in the current iteration). This
4191   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4192   // when the scalar loop is not run at all.
4193   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4194   if (VF.isVector())
4195     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4196         Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
4197         "vector.recur.extract.for.phi");
4198   // When loop is unrolled without vectorizing, initialize
4199   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4200   // `Incoming`. This is analogous to the vectorized case above: extracting the
4201   // second last element when VF > 1.
4202   else if (UF > 1)
4203     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4204 
4205   // Fix the initial value of the original recurrence in the scalar loop.
4206   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4207   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4208   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4209     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4210     Start->addIncoming(Incoming, BB);
4211   }
4212 
4213   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4214   Phi->setName("scalar.recur");
4215 
4216   // Finally, fix users of the recurrence outside the loop. The users will need
4217   // either the last value of the scalar recurrence or the last value of the
4218   // vector recurrence we extracted in the middle block. Since the loop is in
4219   // LCSSA form, we just need to find all the phi nodes for the original scalar
4220   // recurrence in the exit block, and then add an edge for the middle block.
4221   // Note that LCSSA does not imply single entry when the original scalar loop
4222   // had multiple exiting edges (as we always run the last iteration in the
4223   // scalar epilogue); in that case, the exiting path through middle will be
4224   // dynamically dead and the value picked for the phi doesn't matter.
4225   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4226     if (any_of(LCSSAPhi.incoming_values(),
4227                [Phi](Value *V) { return V == Phi; }))
4228       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4229 }
4230 
4231 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) {
4232   // Get it's reduction variable descriptor.
4233   assert(Legal->isReductionVariable(Phi) &&
4234          "Unable to find the reduction variable");
4235   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4236 
4237   RecurKind RK = RdxDesc.getRecurrenceKind();
4238   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4239   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4240   setDebugLocFromInst(Builder, ReductionStartValue);
4241   bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi);
4242 
4243   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4244   // This is the vector-clone of the value that leaves the loop.
4245   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4246 
4247   // Wrap flags are in general invalid after vectorization, clear them.
4248   clearReductionWrapFlags(RdxDesc, State);
4249 
4250   // Fix the vector-loop phi.
4251 
4252   // Reductions do not have to start at zero. They can start with
4253   // any loop invariant values.
4254   BasicBlock *Latch = OrigLoop->getLoopLatch();
4255   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4256 
4257   for (unsigned Part = 0; Part < UF; ++Part) {
4258     Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part);
4259     Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part);
4260     cast<PHINode>(VecRdxPhi)
4261       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4262   }
4263 
4264   // Before each round, move the insertion point right between
4265   // the PHIs and the values we are going to write.
4266   // This allows us to write both PHINodes and the extractelement
4267   // instructions.
4268   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4269 
4270   setDebugLocFromInst(Builder, LoopExitInst);
4271 
4272   // If tail is folded by masking, the vector value to leave the loop should be
4273   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4274   // instead of the former. For an inloop reduction the reduction will already
4275   // be predicated, and does not need to be handled here.
4276   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4277     for (unsigned Part = 0; Part < UF; ++Part) {
4278       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4279       Value *Sel = nullptr;
4280       for (User *U : VecLoopExitInst->users()) {
4281         if (isa<SelectInst>(U)) {
4282           assert(!Sel && "Reduction exit feeding two selects");
4283           Sel = U;
4284         } else
4285           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4286       }
4287       assert(Sel && "Reduction exit feeds no select");
4288       State.reset(LoopExitInstDef, Sel, Part);
4289 
4290       // If the target can create a predicated operator for the reduction at no
4291       // extra cost in the loop (for example a predicated vadd), it can be
4292       // cheaper for the select to remain in the loop than be sunk out of it,
4293       // and so use the select value for the phi instead of the old
4294       // LoopExitValue.
4295       RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4296       if (PreferPredicatedReductionSelect ||
4297           TTI->preferPredicatedReductionSelect(
4298               RdxDesc.getOpcode(), Phi->getType(),
4299               TargetTransformInfo::ReductionFlags())) {
4300         auto *VecRdxPhi =
4301             cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part));
4302         VecRdxPhi->setIncomingValueForBlock(
4303             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4304       }
4305     }
4306   }
4307 
4308   // If the vector reduction can be performed in a smaller type, we truncate
4309   // then extend the loop exit value to enable InstCombine to evaluate the
4310   // entire expression in the smaller type.
4311   if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) {
4312     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4313     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4314     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4315     Builder.SetInsertPoint(
4316         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4317     VectorParts RdxParts(UF);
4318     for (unsigned Part = 0; Part < UF; ++Part) {
4319       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4320       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4321       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4322                                         : Builder.CreateZExt(Trunc, VecTy);
4323       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4324            UI != RdxParts[Part]->user_end();)
4325         if (*UI != Trunc) {
4326           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4327           RdxParts[Part] = Extnd;
4328         } else {
4329           ++UI;
4330         }
4331     }
4332     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4333     for (unsigned Part = 0; Part < UF; ++Part) {
4334       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4335       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4336     }
4337   }
4338 
4339   // Reduce all of the unrolled parts into a single vector.
4340   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4341   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4342 
4343   // The middle block terminator has already been assigned a DebugLoc here (the
4344   // OrigLoop's single latch terminator). We want the whole middle block to
4345   // appear to execute on this line because: (a) it is all compiler generated,
4346   // (b) these instructions are always executed after evaluating the latch
4347   // conditional branch, and (c) other passes may add new predecessors which
4348   // terminate on this line. This is the easiest way to ensure we don't
4349   // accidentally cause an extra step back into the loop while debugging.
4350   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4351   {
4352     // Floating-point operations should have some FMF to enable the reduction.
4353     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4354     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4355     for (unsigned Part = 1; Part < UF; ++Part) {
4356       Value *RdxPart = State.get(LoopExitInstDef, Part);
4357       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4358         ReducedPartRdx = Builder.CreateBinOp(
4359             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4360       } else {
4361         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4362       }
4363     }
4364   }
4365 
4366   // Create the reduction after the loop. Note that inloop reductions create the
4367   // target reduction in the loop using a Reduction recipe.
4368   if (VF.isVector() && !IsInLoopReductionPhi) {
4369     ReducedPartRdx =
4370         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4371     // If the reduction can be performed in a smaller type, we need to extend
4372     // the reduction to the wider type before we branch to the original loop.
4373     if (Phi->getType() != RdxDesc.getRecurrenceType())
4374       ReducedPartRdx =
4375         RdxDesc.isSigned()
4376         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4377         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4378   }
4379 
4380   // Create a phi node that merges control-flow from the backedge-taken check
4381   // block and the middle block.
4382   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4383                                         LoopScalarPreHeader->getTerminator());
4384   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4385     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4386   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4387 
4388   // Now, we need to fix the users of the reduction variable
4389   // inside and outside of the scalar remainder loop.
4390 
4391   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4392   // in the exit blocks.  See comment on analogous loop in
4393   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4394   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4395     if (any_of(LCSSAPhi.incoming_values(),
4396                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4397       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4398 
4399   // Fix the scalar loop reduction variable with the incoming reduction sum
4400   // from the vector body and from the backedge value.
4401   int IncomingEdgeBlockIdx =
4402     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4403   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4404   // Pick the other block.
4405   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4406   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4407   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4408 }
4409 
4410 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
4411                                                   VPTransformState &State) {
4412   RecurKind RK = RdxDesc.getRecurrenceKind();
4413   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4414     return;
4415 
4416   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4417   assert(LoopExitInstr && "null loop exit instruction");
4418   SmallVector<Instruction *, 8> Worklist;
4419   SmallPtrSet<Instruction *, 8> Visited;
4420   Worklist.push_back(LoopExitInstr);
4421   Visited.insert(LoopExitInstr);
4422 
4423   while (!Worklist.empty()) {
4424     Instruction *Cur = Worklist.pop_back_val();
4425     if (isa<OverflowingBinaryOperator>(Cur))
4426       for (unsigned Part = 0; Part < UF; ++Part) {
4427         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4428         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4429       }
4430 
4431     for (User *U : Cur->users()) {
4432       Instruction *UI = cast<Instruction>(U);
4433       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4434           Visited.insert(UI).second)
4435         Worklist.push_back(UI);
4436     }
4437   }
4438 }
4439 
4440 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4441   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4442     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4443       // Some phis were already hand updated by the reduction and recurrence
4444       // code above, leave them alone.
4445       continue;
4446 
4447     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4448     // Non-instruction incoming values will have only one value.
4449 
4450     VPLane Lane = VPLane::getFirstLane();
4451     if (isa<Instruction>(IncomingValue) &&
4452         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4453                                            VF))
4454       Lane = VPLane::getLastLaneForVF(VF);
4455 
4456     // Can be a loop invariant incoming value or the last scalar value to be
4457     // extracted from the vectorized loop.
4458     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4459     Value *lastIncomingValue =
4460         OrigLoop->isLoopInvariant(IncomingValue)
4461             ? IncomingValue
4462             : State.get(State.Plan->getVPValue(IncomingValue),
4463                         VPIteration(UF - 1, Lane));
4464     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4465   }
4466 }
4467 
4468 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4469   // The basic block and loop containing the predicated instruction.
4470   auto *PredBB = PredInst->getParent();
4471   auto *VectorLoop = LI->getLoopFor(PredBB);
4472 
4473   // Initialize a worklist with the operands of the predicated instruction.
4474   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4475 
4476   // Holds instructions that we need to analyze again. An instruction may be
4477   // reanalyzed if we don't yet know if we can sink it or not.
4478   SmallVector<Instruction *, 8> InstsToReanalyze;
4479 
4480   // Returns true if a given use occurs in the predicated block. Phi nodes use
4481   // their operands in their corresponding predecessor blocks.
4482   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4483     auto *I = cast<Instruction>(U.getUser());
4484     BasicBlock *BB = I->getParent();
4485     if (auto *Phi = dyn_cast<PHINode>(I))
4486       BB = Phi->getIncomingBlock(
4487           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4488     return BB == PredBB;
4489   };
4490 
4491   // Iteratively sink the scalarized operands of the predicated instruction
4492   // into the block we created for it. When an instruction is sunk, it's
4493   // operands are then added to the worklist. The algorithm ends after one pass
4494   // through the worklist doesn't sink a single instruction.
4495   bool Changed;
4496   do {
4497     // Add the instructions that need to be reanalyzed to the worklist, and
4498     // reset the changed indicator.
4499     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4500     InstsToReanalyze.clear();
4501     Changed = false;
4502 
4503     while (!Worklist.empty()) {
4504       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4505 
4506       // We can't sink an instruction if it is a phi node, is already in the
4507       // predicated block, is not in the loop, or may have side effects.
4508       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4509           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4510         continue;
4511 
4512       // It's legal to sink the instruction if all its uses occur in the
4513       // predicated block. Otherwise, there's nothing to do yet, and we may
4514       // need to reanalyze the instruction.
4515       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4516         InstsToReanalyze.push_back(I);
4517         continue;
4518       }
4519 
4520       // Move the instruction to the beginning of the predicated block, and add
4521       // it's operands to the worklist.
4522       I->moveBefore(&*PredBB->getFirstInsertionPt());
4523       Worklist.insert(I->op_begin(), I->op_end());
4524 
4525       // The sinking may have enabled other instructions to be sunk, so we will
4526       // need to iterate.
4527       Changed = true;
4528     }
4529   } while (Changed);
4530 }
4531 
4532 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4533   for (PHINode *OrigPhi : OrigPHIsToFix) {
4534     VPWidenPHIRecipe *VPPhi =
4535         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4536     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4537     // Make sure the builder has a valid insert point.
4538     Builder.SetInsertPoint(NewPhi);
4539     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4540       VPValue *Inc = VPPhi->getIncomingValue(i);
4541       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4542       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4543     }
4544   }
4545 }
4546 
4547 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4548                                    VPUser &Operands, unsigned UF,
4549                                    ElementCount VF, bool IsPtrLoopInvariant,
4550                                    SmallBitVector &IsIndexLoopInvariant,
4551                                    VPTransformState &State) {
4552   // Construct a vector GEP by widening the operands of the scalar GEP as
4553   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4554   // results in a vector of pointers when at least one operand of the GEP
4555   // is vector-typed. Thus, to keep the representation compact, we only use
4556   // vector-typed operands for loop-varying values.
4557 
4558   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4559     // If we are vectorizing, but the GEP has only loop-invariant operands,
4560     // the GEP we build (by only using vector-typed operands for
4561     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4562     // produce a vector of pointers, we need to either arbitrarily pick an
4563     // operand to broadcast, or broadcast a clone of the original GEP.
4564     // Here, we broadcast a clone of the original.
4565     //
4566     // TODO: If at some point we decide to scalarize instructions having
4567     //       loop-invariant operands, this special case will no longer be
4568     //       required. We would add the scalarization decision to
4569     //       collectLoopScalars() and teach getVectorValue() to broadcast
4570     //       the lane-zero scalar value.
4571     auto *Clone = Builder.Insert(GEP->clone());
4572     for (unsigned Part = 0; Part < UF; ++Part) {
4573       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4574       State.set(VPDef, EntryPart, Part);
4575       addMetadata(EntryPart, GEP);
4576     }
4577   } else {
4578     // If the GEP has at least one loop-varying operand, we are sure to
4579     // produce a vector of pointers. But if we are only unrolling, we want
4580     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4581     // produce with the code below will be scalar (if VF == 1) or vector
4582     // (otherwise). Note that for the unroll-only case, we still maintain
4583     // values in the vector mapping with initVector, as we do for other
4584     // instructions.
4585     for (unsigned Part = 0; Part < UF; ++Part) {
4586       // The pointer operand of the new GEP. If it's loop-invariant, we
4587       // won't broadcast it.
4588       auto *Ptr = IsPtrLoopInvariant
4589                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4590                       : State.get(Operands.getOperand(0), Part);
4591 
4592       // Collect all the indices for the new GEP. If any index is
4593       // loop-invariant, we won't broadcast it.
4594       SmallVector<Value *, 4> Indices;
4595       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4596         VPValue *Operand = Operands.getOperand(I);
4597         if (IsIndexLoopInvariant[I - 1])
4598           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4599         else
4600           Indices.push_back(State.get(Operand, Part));
4601       }
4602 
4603       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4604       // but it should be a vector, otherwise.
4605       auto *NewGEP =
4606           GEP->isInBounds()
4607               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4608                                           Indices)
4609               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4610       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4611              "NewGEP is not a pointer vector");
4612       State.set(VPDef, NewGEP, Part);
4613       addMetadata(NewGEP, GEP);
4614     }
4615   }
4616 }
4617 
4618 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4619                                               RecurrenceDescriptor *RdxDesc,
4620                                               VPValue *StartVPV, VPValue *Def,
4621                                               VPTransformState &State) {
4622   PHINode *P = cast<PHINode>(PN);
4623   if (EnableVPlanNativePath) {
4624     // Currently we enter here in the VPlan-native path for non-induction
4625     // PHIs where all control flow is uniform. We simply widen these PHIs.
4626     // Create a vector phi with no operands - the vector phi operands will be
4627     // set at the end of vector code generation.
4628     Type *VecTy = (State.VF.isScalar())
4629                       ? PN->getType()
4630                       : VectorType::get(PN->getType(), State.VF);
4631     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4632     State.set(Def, VecPhi, 0);
4633     OrigPHIsToFix.push_back(P);
4634 
4635     return;
4636   }
4637 
4638   assert(PN->getParent() == OrigLoop->getHeader() &&
4639          "Non-header phis should have been handled elsewhere");
4640 
4641   Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr;
4642   // In order to support recurrences we need to be able to vectorize Phi nodes.
4643   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4644   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4645   // this value when we vectorize all of the instructions that use the PHI.
4646   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4647     Value *Iden = nullptr;
4648     bool ScalarPHI =
4649         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4650     Type *VecTy =
4651         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4652 
4653     if (RdxDesc) {
4654       assert(Legal->isReductionVariable(P) && StartV &&
4655              "RdxDesc should only be set for reduction variables; in that case "
4656              "a StartV is also required");
4657       RecurKind RK = RdxDesc->getRecurrenceKind();
4658       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4659         // MinMax reduction have the start value as their identify.
4660         if (ScalarPHI) {
4661           Iden = StartV;
4662         } else {
4663           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4664           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4665           StartV = Iden =
4666               Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4667         }
4668       } else {
4669         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4670             RK, VecTy->getScalarType());
4671         Iden = IdenC;
4672 
4673         if (!ScalarPHI) {
4674           Iden = ConstantVector::getSplat(State.VF, IdenC);
4675           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4676           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4677           Constant *Zero = Builder.getInt32(0);
4678           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4679         }
4680       }
4681     }
4682 
4683     for (unsigned Part = 0; Part < State.UF; ++Part) {
4684       // This is phase one of vectorizing PHIs.
4685       Value *EntryPart = PHINode::Create(
4686           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4687       State.set(Def, EntryPart, Part);
4688       if (StartV) {
4689         // Make sure to add the reduction start value only to the
4690         // first unroll part.
4691         Value *StartVal = (Part == 0) ? StartV : Iden;
4692         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4693       }
4694     }
4695     return;
4696   }
4697 
4698   assert(!Legal->isReductionVariable(P) &&
4699          "reductions should be handled above");
4700 
4701   setDebugLocFromInst(Builder, P);
4702 
4703   // This PHINode must be an induction variable.
4704   // Make sure that we know about it.
4705   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4706 
4707   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4708   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4709 
4710   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4711   // which can be found from the original scalar operations.
4712   switch (II.getKind()) {
4713   case InductionDescriptor::IK_NoInduction:
4714     llvm_unreachable("Unknown induction");
4715   case InductionDescriptor::IK_IntInduction:
4716   case InductionDescriptor::IK_FpInduction:
4717     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4718   case InductionDescriptor::IK_PtrInduction: {
4719     // Handle the pointer induction variable case.
4720     assert(P->getType()->isPointerTy() && "Unexpected type.");
4721 
4722     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4723       // This is the normalized GEP that starts counting at zero.
4724       Value *PtrInd =
4725           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4726       // Determine the number of scalars we need to generate for each unroll
4727       // iteration. If the instruction is uniform, we only need to generate the
4728       // first lane. Otherwise, we generate all VF values.
4729       unsigned Lanes = Cost->isUniformAfterVectorization(P, State.VF)
4730                            ? 1
4731                            : State.VF.getKnownMinValue();
4732       for (unsigned Part = 0; Part < UF; ++Part) {
4733         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4734           Constant *Idx = ConstantInt::get(
4735               PtrInd->getType(), Lane + Part * State.VF.getKnownMinValue());
4736           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4737           Value *SclrGep =
4738               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4739           SclrGep->setName("next.gep");
4740           State.set(Def, SclrGep, VPIteration(Part, Lane));
4741         }
4742       }
4743       return;
4744     }
4745     assert(isa<SCEVConstant>(II.getStep()) &&
4746            "Induction step not a SCEV constant!");
4747     Type *PhiType = II.getStep()->getType();
4748 
4749     // Build a pointer phi
4750     Value *ScalarStartValue = II.getStartValue();
4751     Type *ScStValueType = ScalarStartValue->getType();
4752     PHINode *NewPointerPhi =
4753         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4754     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4755 
4756     // A pointer induction, performed by using a gep
4757     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4758     Instruction *InductionLoc = LoopLatch->getTerminator();
4759     const SCEV *ScalarStep = II.getStep();
4760     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4761     Value *ScalarStepValue =
4762         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4763     Value *InductionGEP = GetElementPtrInst::Create(
4764         ScStValueType->getPointerElementType(), NewPointerPhi,
4765         Builder.CreateMul(
4766             ScalarStepValue,
4767             ConstantInt::get(PhiType, State.VF.getKnownMinValue() * State.UF)),
4768         "ptr.ind", InductionLoc);
4769     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4770 
4771     // Create UF many actual address geps that use the pointer
4772     // phi as base and a vectorized version of the step value
4773     // (<step*0, ..., step*N>) as offset.
4774     for (unsigned Part = 0; Part < State.UF; ++Part) {
4775       SmallVector<Constant *, 8> Indices;
4776       // Create a vector of consecutive numbers from zero to VF.
4777       for (unsigned i = 0; i < State.VF.getKnownMinValue(); ++i)
4778         Indices.push_back(
4779             ConstantInt::get(PhiType, i + Part * State.VF.getKnownMinValue()));
4780       Constant *StartOffset = ConstantVector::get(Indices);
4781 
4782       Value *GEP = Builder.CreateGEP(
4783           ScStValueType->getPointerElementType(), NewPointerPhi,
4784           Builder.CreateMul(StartOffset,
4785                             Builder.CreateVectorSplat(
4786                                 State.VF.getKnownMinValue(), ScalarStepValue),
4787                             "vector.gep"));
4788       State.set(Def, GEP, Part);
4789     }
4790   }
4791   }
4792 }
4793 
4794 /// A helper function for checking whether an integer division-related
4795 /// instruction may divide by zero (in which case it must be predicated if
4796 /// executed conditionally in the scalar code).
4797 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4798 /// Non-zero divisors that are non compile-time constants will not be
4799 /// converted into multiplication, so we will still end up scalarizing
4800 /// the division, but can do so w/o predication.
4801 static bool mayDivideByZero(Instruction &I) {
4802   assert((I.getOpcode() == Instruction::UDiv ||
4803           I.getOpcode() == Instruction::SDiv ||
4804           I.getOpcode() == Instruction::URem ||
4805           I.getOpcode() == Instruction::SRem) &&
4806          "Unexpected instruction");
4807   Value *Divisor = I.getOperand(1);
4808   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4809   return !CInt || CInt->isZero();
4810 }
4811 
4812 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4813                                            VPUser &User,
4814                                            VPTransformState &State) {
4815   switch (I.getOpcode()) {
4816   case Instruction::Call:
4817   case Instruction::Br:
4818   case Instruction::PHI:
4819   case Instruction::GetElementPtr:
4820   case Instruction::Select:
4821     llvm_unreachable("This instruction is handled by a different recipe.");
4822   case Instruction::UDiv:
4823   case Instruction::SDiv:
4824   case Instruction::SRem:
4825   case Instruction::URem:
4826   case Instruction::Add:
4827   case Instruction::FAdd:
4828   case Instruction::Sub:
4829   case Instruction::FSub:
4830   case Instruction::FNeg:
4831   case Instruction::Mul:
4832   case Instruction::FMul:
4833   case Instruction::FDiv:
4834   case Instruction::FRem:
4835   case Instruction::Shl:
4836   case Instruction::LShr:
4837   case Instruction::AShr:
4838   case Instruction::And:
4839   case Instruction::Or:
4840   case Instruction::Xor: {
4841     // Just widen unops and binops.
4842     setDebugLocFromInst(Builder, &I);
4843 
4844     for (unsigned Part = 0; Part < UF; ++Part) {
4845       SmallVector<Value *, 2> Ops;
4846       for (VPValue *VPOp : User.operands())
4847         Ops.push_back(State.get(VPOp, Part));
4848 
4849       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4850 
4851       if (auto *VecOp = dyn_cast<Instruction>(V))
4852         VecOp->copyIRFlags(&I);
4853 
4854       // Use this vector value for all users of the original instruction.
4855       State.set(Def, V, Part);
4856       addMetadata(V, &I);
4857     }
4858 
4859     break;
4860   }
4861   case Instruction::ICmp:
4862   case Instruction::FCmp: {
4863     // Widen compares. Generate vector compares.
4864     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4865     auto *Cmp = cast<CmpInst>(&I);
4866     setDebugLocFromInst(Builder, Cmp);
4867     for (unsigned Part = 0; Part < UF; ++Part) {
4868       Value *A = State.get(User.getOperand(0), Part);
4869       Value *B = State.get(User.getOperand(1), Part);
4870       Value *C = nullptr;
4871       if (FCmp) {
4872         // Propagate fast math flags.
4873         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4874         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4875         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4876       } else {
4877         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4878       }
4879       State.set(Def, C, Part);
4880       addMetadata(C, &I);
4881     }
4882 
4883     break;
4884   }
4885 
4886   case Instruction::ZExt:
4887   case Instruction::SExt:
4888   case Instruction::FPToUI:
4889   case Instruction::FPToSI:
4890   case Instruction::FPExt:
4891   case Instruction::PtrToInt:
4892   case Instruction::IntToPtr:
4893   case Instruction::SIToFP:
4894   case Instruction::UIToFP:
4895   case Instruction::Trunc:
4896   case Instruction::FPTrunc:
4897   case Instruction::BitCast: {
4898     auto *CI = cast<CastInst>(&I);
4899     setDebugLocFromInst(Builder, CI);
4900 
4901     /// Vectorize casts.
4902     Type *DestTy =
4903         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4904 
4905     for (unsigned Part = 0; Part < UF; ++Part) {
4906       Value *A = State.get(User.getOperand(0), Part);
4907       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4908       State.set(Def, Cast, Part);
4909       addMetadata(Cast, &I);
4910     }
4911     break;
4912   }
4913   default:
4914     // This instruction is not vectorized by simple widening.
4915     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4916     llvm_unreachable("Unhandled instruction!");
4917   } // end of switch.
4918 }
4919 
4920 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4921                                                VPUser &ArgOperands,
4922                                                VPTransformState &State) {
4923   assert(!isa<DbgInfoIntrinsic>(I) &&
4924          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4925   setDebugLocFromInst(Builder, &I);
4926 
4927   Module *M = I.getParent()->getParent()->getParent();
4928   auto *CI = cast<CallInst>(&I);
4929 
4930   SmallVector<Type *, 4> Tys;
4931   for (Value *ArgOperand : CI->arg_operands())
4932     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4933 
4934   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4935 
4936   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4937   // version of the instruction.
4938   // Is it beneficial to perform intrinsic call compared to lib call?
4939   bool NeedToScalarize = false;
4940   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4941   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4942   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4943   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4944          "Instruction should be scalarized elsewhere.");
4945   assert(IntrinsicCost.isValid() && CallCost.isValid() &&
4946          "Cannot have invalid costs while widening");
4947 
4948   for (unsigned Part = 0; Part < UF; ++Part) {
4949     SmallVector<Value *, 4> Args;
4950     for (auto &I : enumerate(ArgOperands.operands())) {
4951       // Some intrinsics have a scalar argument - don't replace it with a
4952       // vector.
4953       Value *Arg;
4954       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4955         Arg = State.get(I.value(), Part);
4956       else
4957         Arg = State.get(I.value(), VPIteration(0, 0));
4958       Args.push_back(Arg);
4959     }
4960 
4961     Function *VectorF;
4962     if (UseVectorIntrinsic) {
4963       // Use vector version of the intrinsic.
4964       Type *TysForDecl[] = {CI->getType()};
4965       if (VF.isVector())
4966         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4967       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4968       assert(VectorF && "Can't retrieve vector intrinsic.");
4969     } else {
4970       // Use vector version of the function call.
4971       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4972 #ifndef NDEBUG
4973       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4974              "Can't create vector function.");
4975 #endif
4976         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4977     }
4978       SmallVector<OperandBundleDef, 1> OpBundles;
4979       CI->getOperandBundlesAsDefs(OpBundles);
4980       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4981 
4982       if (isa<FPMathOperator>(V))
4983         V->copyFastMathFlags(CI);
4984 
4985       State.set(Def, V, Part);
4986       addMetadata(V, &I);
4987   }
4988 }
4989 
4990 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
4991                                                  VPUser &Operands,
4992                                                  bool InvariantCond,
4993                                                  VPTransformState &State) {
4994   setDebugLocFromInst(Builder, &I);
4995 
4996   // The condition can be loop invariant  but still defined inside the
4997   // loop. This means that we can't just use the original 'cond' value.
4998   // We have to take the 'vectorized' value and pick the first lane.
4999   // Instcombine will make this a no-op.
5000   auto *InvarCond = InvariantCond
5001                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5002                         : nullptr;
5003 
5004   for (unsigned Part = 0; Part < UF; ++Part) {
5005     Value *Cond =
5006         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5007     Value *Op0 = State.get(Operands.getOperand(1), Part);
5008     Value *Op1 = State.get(Operands.getOperand(2), Part);
5009     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5010     State.set(VPDef, Sel, Part);
5011     addMetadata(Sel, &I);
5012   }
5013 }
5014 
5015 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5016   // We should not collect Scalars more than once per VF. Right now, this
5017   // function is called from collectUniformsAndScalars(), which already does
5018   // this check. Collecting Scalars for VF=1 does not make any sense.
5019   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5020          "This function should not be visited twice for the same VF");
5021 
5022   SmallSetVector<Instruction *, 8> Worklist;
5023 
5024   // These sets are used to seed the analysis with pointers used by memory
5025   // accesses that will remain scalar.
5026   SmallSetVector<Instruction *, 8> ScalarPtrs;
5027   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5028   auto *Latch = TheLoop->getLoopLatch();
5029 
5030   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5031   // The pointer operands of loads and stores will be scalar as long as the
5032   // memory access is not a gather or scatter operation. The value operand of a
5033   // store will remain scalar if the store is scalarized.
5034   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5035     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5036     assert(WideningDecision != CM_Unknown &&
5037            "Widening decision should be ready at this moment");
5038     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5039       if (Ptr == Store->getValueOperand())
5040         return WideningDecision == CM_Scalarize;
5041     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5042            "Ptr is neither a value or pointer operand");
5043     return WideningDecision != CM_GatherScatter;
5044   };
5045 
5046   // A helper that returns true if the given value is a bitcast or
5047   // getelementptr instruction contained in the loop.
5048   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5049     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5050             isa<GetElementPtrInst>(V)) &&
5051            !TheLoop->isLoopInvariant(V);
5052   };
5053 
5054   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5055     if (!isa<PHINode>(Ptr) ||
5056         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5057       return false;
5058     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5059     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5060       return false;
5061     return isScalarUse(MemAccess, Ptr);
5062   };
5063 
5064   // A helper that evaluates a memory access's use of a pointer. If the
5065   // pointer is actually the pointer induction of a loop, it is being
5066   // inserted into Worklist. If the use will be a scalar use, and the
5067   // pointer is only used by memory accesses, we place the pointer in
5068   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5069   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5070     if (isScalarPtrInduction(MemAccess, Ptr)) {
5071       Worklist.insert(cast<Instruction>(Ptr));
5072       Instruction *Update = cast<Instruction>(
5073           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5074       Worklist.insert(Update);
5075       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5076                         << "\n");
5077       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5078                         << "\n");
5079       return;
5080     }
5081     // We only care about bitcast and getelementptr instructions contained in
5082     // the loop.
5083     if (!isLoopVaryingBitCastOrGEP(Ptr))
5084       return;
5085 
5086     // If the pointer has already been identified as scalar (e.g., if it was
5087     // also identified as uniform), there's nothing to do.
5088     auto *I = cast<Instruction>(Ptr);
5089     if (Worklist.count(I))
5090       return;
5091 
5092     // If the use of the pointer will be a scalar use, and all users of the
5093     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5094     // place the pointer in PossibleNonScalarPtrs.
5095     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5096           return isa<LoadInst>(U) || isa<StoreInst>(U);
5097         }))
5098       ScalarPtrs.insert(I);
5099     else
5100       PossibleNonScalarPtrs.insert(I);
5101   };
5102 
5103   // We seed the scalars analysis with three classes of instructions: (1)
5104   // instructions marked uniform-after-vectorization and (2) bitcast,
5105   // getelementptr and (pointer) phi instructions used by memory accesses
5106   // requiring a scalar use.
5107   //
5108   // (1) Add to the worklist all instructions that have been identified as
5109   // uniform-after-vectorization.
5110   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5111 
5112   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5113   // memory accesses requiring a scalar use. The pointer operands of loads and
5114   // stores will be scalar as long as the memory accesses is not a gather or
5115   // scatter operation. The value operand of a store will remain scalar if the
5116   // store is scalarized.
5117   for (auto *BB : TheLoop->blocks())
5118     for (auto &I : *BB) {
5119       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5120         evaluatePtrUse(Load, Load->getPointerOperand());
5121       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5122         evaluatePtrUse(Store, Store->getPointerOperand());
5123         evaluatePtrUse(Store, Store->getValueOperand());
5124       }
5125     }
5126   for (auto *I : ScalarPtrs)
5127     if (!PossibleNonScalarPtrs.count(I)) {
5128       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5129       Worklist.insert(I);
5130     }
5131 
5132   // Insert the forced scalars.
5133   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5134   // induction variable when the PHI user is scalarized.
5135   auto ForcedScalar = ForcedScalars.find(VF);
5136   if (ForcedScalar != ForcedScalars.end())
5137     for (auto *I : ForcedScalar->second)
5138       Worklist.insert(I);
5139 
5140   // Expand the worklist by looking through any bitcasts and getelementptr
5141   // instructions we've already identified as scalar. This is similar to the
5142   // expansion step in collectLoopUniforms(); however, here we're only
5143   // expanding to include additional bitcasts and getelementptr instructions.
5144   unsigned Idx = 0;
5145   while (Idx != Worklist.size()) {
5146     Instruction *Dst = Worklist[Idx++];
5147     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5148       continue;
5149     auto *Src = cast<Instruction>(Dst->getOperand(0));
5150     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5151           auto *J = cast<Instruction>(U);
5152           return !TheLoop->contains(J) || Worklist.count(J) ||
5153                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5154                   isScalarUse(J, Src));
5155         })) {
5156       Worklist.insert(Src);
5157       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5158     }
5159   }
5160 
5161   // An induction variable will remain scalar if all users of the induction
5162   // variable and induction variable update remain scalar.
5163   for (auto &Induction : Legal->getInductionVars()) {
5164     auto *Ind = Induction.first;
5165     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5166 
5167     // If tail-folding is applied, the primary induction variable will be used
5168     // to feed a vector compare.
5169     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5170       continue;
5171 
5172     // Determine if all users of the induction variable are scalar after
5173     // vectorization.
5174     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5175       auto *I = cast<Instruction>(U);
5176       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5177     });
5178     if (!ScalarInd)
5179       continue;
5180 
5181     // Determine if all users of the induction variable update instruction are
5182     // scalar after vectorization.
5183     auto ScalarIndUpdate =
5184         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5185           auto *I = cast<Instruction>(U);
5186           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5187         });
5188     if (!ScalarIndUpdate)
5189       continue;
5190 
5191     // The induction variable and its update instruction will remain scalar.
5192     Worklist.insert(Ind);
5193     Worklist.insert(IndUpdate);
5194     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5195     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5196                       << "\n");
5197   }
5198 
5199   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5200 }
5201 
5202 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I,
5203                                                          ElementCount VF) {
5204   if (!blockNeedsPredication(I->getParent()))
5205     return false;
5206   switch(I->getOpcode()) {
5207   default:
5208     break;
5209   case Instruction::Load:
5210   case Instruction::Store: {
5211     if (!Legal->isMaskRequired(I))
5212       return false;
5213     auto *Ptr = getLoadStorePointerOperand(I);
5214     auto *Ty = getMemInstValueType(I);
5215     // We have already decided how to vectorize this instruction, get that
5216     // result.
5217     if (VF.isVector()) {
5218       InstWidening WideningDecision = getWideningDecision(I, VF);
5219       assert(WideningDecision != CM_Unknown &&
5220              "Widening decision should be ready at this moment");
5221       return WideningDecision == CM_Scalarize;
5222     }
5223     const Align Alignment = getLoadStoreAlignment(I);
5224     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5225                                 isLegalMaskedGather(Ty, Alignment))
5226                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5227                                 isLegalMaskedScatter(Ty, Alignment));
5228   }
5229   case Instruction::UDiv:
5230   case Instruction::SDiv:
5231   case Instruction::SRem:
5232   case Instruction::URem:
5233     return mayDivideByZero(*I);
5234   }
5235   return false;
5236 }
5237 
5238 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5239     Instruction *I, ElementCount VF) {
5240   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5241   assert(getWideningDecision(I, VF) == CM_Unknown &&
5242          "Decision should not be set yet.");
5243   auto *Group = getInterleavedAccessGroup(I);
5244   assert(Group && "Must have a group.");
5245 
5246   // If the instruction's allocated size doesn't equal it's type size, it
5247   // requires padding and will be scalarized.
5248   auto &DL = I->getModule()->getDataLayout();
5249   auto *ScalarTy = getMemInstValueType(I);
5250   if (hasIrregularType(ScalarTy, DL, VF))
5251     return false;
5252 
5253   // Check if masking is required.
5254   // A Group may need masking for one of two reasons: it resides in a block that
5255   // needs predication, or it was decided to use masking to deal with gaps.
5256   bool PredicatedAccessRequiresMasking =
5257       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5258   bool AccessWithGapsRequiresMasking =
5259       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5260   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5261     return true;
5262 
5263   // If masked interleaving is required, we expect that the user/target had
5264   // enabled it, because otherwise it either wouldn't have been created or
5265   // it should have been invalidated by the CostModel.
5266   assert(useMaskedInterleavedAccesses(TTI) &&
5267          "Masked interleave-groups for predicated accesses are not enabled.");
5268 
5269   auto *Ty = getMemInstValueType(I);
5270   const Align Alignment = getLoadStoreAlignment(I);
5271   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5272                           : TTI.isLegalMaskedStore(Ty, Alignment);
5273 }
5274 
5275 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5276     Instruction *I, ElementCount VF) {
5277   // Get and ensure we have a valid memory instruction.
5278   LoadInst *LI = dyn_cast<LoadInst>(I);
5279   StoreInst *SI = dyn_cast<StoreInst>(I);
5280   assert((LI || SI) && "Invalid memory instruction");
5281 
5282   auto *Ptr = getLoadStorePointerOperand(I);
5283 
5284   // In order to be widened, the pointer should be consecutive, first of all.
5285   if (!Legal->isConsecutivePtr(Ptr))
5286     return false;
5287 
5288   // If the instruction is a store located in a predicated block, it will be
5289   // scalarized.
5290   if (isScalarWithPredication(I))
5291     return false;
5292 
5293   // If the instruction's allocated size doesn't equal it's type size, it
5294   // requires padding and will be scalarized.
5295   auto &DL = I->getModule()->getDataLayout();
5296   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5297   if (hasIrregularType(ScalarTy, DL, VF))
5298     return false;
5299 
5300   return true;
5301 }
5302 
5303 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5304   // We should not collect Uniforms more than once per VF. Right now,
5305   // this function is called from collectUniformsAndScalars(), which
5306   // already does this check. Collecting Uniforms for VF=1 does not make any
5307   // sense.
5308 
5309   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5310          "This function should not be visited twice for the same VF");
5311 
5312   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5313   // not analyze again.  Uniforms.count(VF) will return 1.
5314   Uniforms[VF].clear();
5315 
5316   // We now know that the loop is vectorizable!
5317   // Collect instructions inside the loop that will remain uniform after
5318   // vectorization.
5319 
5320   // Global values, params and instructions outside of current loop are out of
5321   // scope.
5322   auto isOutOfScope = [&](Value *V) -> bool {
5323     Instruction *I = dyn_cast<Instruction>(V);
5324     return (!I || !TheLoop->contains(I));
5325   };
5326 
5327   SetVector<Instruction *> Worklist;
5328   BasicBlock *Latch = TheLoop->getLoopLatch();
5329 
5330   // Instructions that are scalar with predication must not be considered
5331   // uniform after vectorization, because that would create an erroneous
5332   // replicating region where only a single instance out of VF should be formed.
5333   // TODO: optimize such seldom cases if found important, see PR40816.
5334   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5335     if (isOutOfScope(I)) {
5336       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5337                         << *I << "\n");
5338       return;
5339     }
5340     if (isScalarWithPredication(I, VF)) {
5341       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5342                         << *I << "\n");
5343       return;
5344     }
5345     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5346     Worklist.insert(I);
5347   };
5348 
5349   // Start with the conditional branch. If the branch condition is an
5350   // instruction contained in the loop that is only used by the branch, it is
5351   // uniform.
5352   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5353   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5354     addToWorklistIfAllowed(Cmp);
5355 
5356   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5357     InstWidening WideningDecision = getWideningDecision(I, VF);
5358     assert(WideningDecision != CM_Unknown &&
5359            "Widening decision should be ready at this moment");
5360 
5361     // A uniform memory op is itself uniform.  We exclude uniform stores
5362     // here as they demand the last lane, not the first one.
5363     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5364       assert(WideningDecision == CM_Scalarize);
5365       return true;
5366     }
5367 
5368     return (WideningDecision == CM_Widen ||
5369             WideningDecision == CM_Widen_Reverse ||
5370             WideningDecision == CM_Interleave);
5371   };
5372 
5373 
5374   // Returns true if Ptr is the pointer operand of a memory access instruction
5375   // I, and I is known to not require scalarization.
5376   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5377     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5378   };
5379 
5380   // Holds a list of values which are known to have at least one uniform use.
5381   // Note that there may be other uses which aren't uniform.  A "uniform use"
5382   // here is something which only demands lane 0 of the unrolled iterations;
5383   // it does not imply that all lanes produce the same value (e.g. this is not
5384   // the usual meaning of uniform)
5385   SmallPtrSet<Value *, 8> HasUniformUse;
5386 
5387   // Scan the loop for instructions which are either a) known to have only
5388   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5389   for (auto *BB : TheLoop->blocks())
5390     for (auto &I : *BB) {
5391       // If there's no pointer operand, there's nothing to do.
5392       auto *Ptr = getLoadStorePointerOperand(&I);
5393       if (!Ptr)
5394         continue;
5395 
5396       // A uniform memory op is itself uniform.  We exclude uniform stores
5397       // here as they demand the last lane, not the first one.
5398       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5399         addToWorklistIfAllowed(&I);
5400 
5401       if (isUniformDecision(&I, VF)) {
5402         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5403         HasUniformUse.insert(Ptr);
5404       }
5405     }
5406 
5407   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5408   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5409   // disallows uses outside the loop as well.
5410   for (auto *V : HasUniformUse) {
5411     if (isOutOfScope(V))
5412       continue;
5413     auto *I = cast<Instruction>(V);
5414     auto UsersAreMemAccesses =
5415       llvm::all_of(I->users(), [&](User *U) -> bool {
5416         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5417       });
5418     if (UsersAreMemAccesses)
5419       addToWorklistIfAllowed(I);
5420   }
5421 
5422   // Expand Worklist in topological order: whenever a new instruction
5423   // is added , its users should be already inside Worklist.  It ensures
5424   // a uniform instruction will only be used by uniform instructions.
5425   unsigned idx = 0;
5426   while (idx != Worklist.size()) {
5427     Instruction *I = Worklist[idx++];
5428 
5429     for (auto OV : I->operand_values()) {
5430       // isOutOfScope operands cannot be uniform instructions.
5431       if (isOutOfScope(OV))
5432         continue;
5433       // First order recurrence Phi's should typically be considered
5434       // non-uniform.
5435       auto *OP = dyn_cast<PHINode>(OV);
5436       if (OP && Legal->isFirstOrderRecurrence(OP))
5437         continue;
5438       // If all the users of the operand are uniform, then add the
5439       // operand into the uniform worklist.
5440       auto *OI = cast<Instruction>(OV);
5441       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5442             auto *J = cast<Instruction>(U);
5443             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5444           }))
5445         addToWorklistIfAllowed(OI);
5446     }
5447   }
5448 
5449   // For an instruction to be added into Worklist above, all its users inside
5450   // the loop should also be in Worklist. However, this condition cannot be
5451   // true for phi nodes that form a cyclic dependence. We must process phi
5452   // nodes separately. An induction variable will remain uniform if all users
5453   // of the induction variable and induction variable update remain uniform.
5454   // The code below handles both pointer and non-pointer induction variables.
5455   for (auto &Induction : Legal->getInductionVars()) {
5456     auto *Ind = Induction.first;
5457     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5458 
5459     // Determine if all users of the induction variable are uniform after
5460     // vectorization.
5461     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5462       auto *I = cast<Instruction>(U);
5463       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5464              isVectorizedMemAccessUse(I, Ind);
5465     });
5466     if (!UniformInd)
5467       continue;
5468 
5469     // Determine if all users of the induction variable update instruction are
5470     // uniform after vectorization.
5471     auto UniformIndUpdate =
5472         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5473           auto *I = cast<Instruction>(U);
5474           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5475                  isVectorizedMemAccessUse(I, IndUpdate);
5476         });
5477     if (!UniformIndUpdate)
5478       continue;
5479 
5480     // The induction variable and its update instruction will remain uniform.
5481     addToWorklistIfAllowed(Ind);
5482     addToWorklistIfAllowed(IndUpdate);
5483   }
5484 
5485   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5486 }
5487 
5488 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5489   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5490 
5491   if (Legal->getRuntimePointerChecking()->Need) {
5492     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5493         "runtime pointer checks needed. Enable vectorization of this "
5494         "loop with '#pragma clang loop vectorize(enable)' when "
5495         "compiling with -Os/-Oz",
5496         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5497     return true;
5498   }
5499 
5500   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5501     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5502         "runtime SCEV checks needed. Enable vectorization of this "
5503         "loop with '#pragma clang loop vectorize(enable)' when "
5504         "compiling with -Os/-Oz",
5505         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5506     return true;
5507   }
5508 
5509   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5510   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5511     reportVectorizationFailure("Runtime stride check for small trip count",
5512         "runtime stride == 1 checks needed. Enable vectorization of "
5513         "this loop without such check by compiling with -Os/-Oz",
5514         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5515     return true;
5516   }
5517 
5518   return false;
5519 }
5520 
5521 Optional<ElementCount>
5522 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5523   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5524     // TODO: It may by useful to do since it's still likely to be dynamically
5525     // uniform if the target can skip.
5526     reportVectorizationFailure(
5527         "Not inserting runtime ptr check for divergent target",
5528         "runtime pointer checks needed. Not enabled for divergent target",
5529         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5530     return None;
5531   }
5532 
5533   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5534   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5535   if (TC == 1) {
5536     reportVectorizationFailure("Single iteration (non) loop",
5537         "loop trip count is one, irrelevant for vectorization",
5538         "SingleIterationLoop", ORE, TheLoop);
5539     return None;
5540   }
5541 
5542   switch (ScalarEpilogueStatus) {
5543   case CM_ScalarEpilogueAllowed:
5544     return computeFeasibleMaxVF(TC, UserVF);
5545   case CM_ScalarEpilogueNotAllowedUsePredicate:
5546     LLVM_FALLTHROUGH;
5547   case CM_ScalarEpilogueNotNeededUsePredicate:
5548     LLVM_DEBUG(
5549         dbgs() << "LV: vector predicate hint/switch found.\n"
5550                << "LV: Not allowing scalar epilogue, creating predicated "
5551                << "vector loop.\n");
5552     break;
5553   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5554     // fallthrough as a special case of OptForSize
5555   case CM_ScalarEpilogueNotAllowedOptSize:
5556     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5557       LLVM_DEBUG(
5558           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5559     else
5560       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5561                         << "count.\n");
5562 
5563     // Bail if runtime checks are required, which are not good when optimising
5564     // for size.
5565     if (runtimeChecksRequired())
5566       return None;
5567 
5568     break;
5569   }
5570 
5571   // The only loops we can vectorize without a scalar epilogue, are loops with
5572   // a bottom-test and a single exiting block. We'd have to handle the fact
5573   // that not every instruction executes on the last iteration.  This will
5574   // require a lane mask which varies through the vector loop body.  (TODO)
5575   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5576     // If there was a tail-folding hint/switch, but we can't fold the tail by
5577     // masking, fallback to a vectorization with a scalar epilogue.
5578     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5579       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5580                            "scalar epilogue instead.\n");
5581       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5582       return computeFeasibleMaxVF(TC, UserVF);
5583     }
5584     return None;
5585   }
5586 
5587   // Now try the tail folding
5588 
5589   // Invalidate interleave groups that require an epilogue if we can't mask
5590   // the interleave-group.
5591   if (!useMaskedInterleavedAccesses(TTI)) {
5592     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5593            "No decisions should have been taken at this point");
5594     // Note: There is no need to invalidate any cost modeling decisions here, as
5595     // non where taken so far.
5596     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5597   }
5598 
5599   ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF);
5600   assert(!MaxVF.isScalable() &&
5601          "Scalable vectors do not yet support tail folding");
5602   assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) &&
5603          "MaxVF must be a power of 2");
5604   unsigned MaxVFtimesIC =
5605       UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue();
5606   // Avoid tail folding if the trip count is known to be a multiple of any VF we
5607   // chose.
5608   ScalarEvolution *SE = PSE.getSE();
5609   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5610   const SCEV *ExitCount = SE->getAddExpr(
5611       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5612   const SCEV *Rem = SE->getURemExpr(
5613       SE->applyLoopGuards(ExitCount, TheLoop),
5614       SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5615   if (Rem->isZero()) {
5616     // Accept MaxVF if we do not have a tail.
5617     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5618     return MaxVF;
5619   }
5620 
5621   // If we don't know the precise trip count, or if the trip count that we
5622   // found modulo the vectorization factor is not zero, try to fold the tail
5623   // by masking.
5624   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5625   if (Legal->prepareToFoldTailByMasking()) {
5626     FoldTailByMasking = true;
5627     return MaxVF;
5628   }
5629 
5630   // If there was a tail-folding hint/switch, but we can't fold the tail by
5631   // masking, fallback to a vectorization with a scalar epilogue.
5632   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5633     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5634                          "scalar epilogue instead.\n");
5635     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5636     return MaxVF;
5637   }
5638 
5639   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5640     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5641     return None;
5642   }
5643 
5644   if (TC == 0) {
5645     reportVectorizationFailure(
5646         "Unable to calculate the loop count due to complex control flow",
5647         "unable to calculate the loop count due to complex control flow",
5648         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5649     return None;
5650   }
5651 
5652   reportVectorizationFailure(
5653       "Cannot optimize for size and vectorize at the same time.",
5654       "cannot optimize for size and vectorize at the same time. "
5655       "Enable vectorization of this loop with '#pragma clang loop "
5656       "vectorize(enable)' when compiling with -Os/-Oz",
5657       "NoTailLoopWithOptForSize", ORE, TheLoop);
5658   return None;
5659 }
5660 
5661 ElementCount
5662 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5663                                                  ElementCount UserVF) {
5664   bool IgnoreScalableUserVF = UserVF.isScalable() &&
5665                               !TTI.supportsScalableVectors() &&
5666                               !ForceTargetSupportsScalableVectors;
5667   if (IgnoreScalableUserVF) {
5668     LLVM_DEBUG(
5669         dbgs() << "LV: Ignoring VF=" << UserVF
5670                << " because target does not support scalable vectors.\n");
5671     ORE->emit([&]() {
5672       return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF",
5673                                         TheLoop->getStartLoc(),
5674                                         TheLoop->getHeader())
5675              << "Ignoring VF=" << ore::NV("UserVF", UserVF)
5676              << " because target does not support scalable vectors.";
5677     });
5678   }
5679 
5680   // Beyond this point two scenarios are handled. If UserVF isn't specified
5681   // then a suitable VF is chosen. If UserVF is specified and there are
5682   // dependencies, check if it's legal. However, if a UserVF is specified and
5683   // there are no dependencies, then there's nothing to do.
5684   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5685     if (!canVectorizeReductions(UserVF)) {
5686       reportVectorizationFailure(
5687           "LV: Scalable vectorization not supported for the reduction "
5688           "operations found in this loop. Using fixed-width "
5689           "vectorization instead.",
5690           "Scalable vectorization not supported for the reduction operations "
5691           "found in this loop. Using fixed-width vectorization instead.",
5692           "ScalableVFUnfeasible", ORE, TheLoop);
5693       return computeFeasibleMaxVF(
5694           ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5695     }
5696 
5697     if (Legal->isSafeForAnyVectorWidth())
5698       return UserVF;
5699   }
5700 
5701   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5702   unsigned SmallestType, WidestType;
5703   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5704   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5705 
5706   // Get the maximum safe dependence distance in bits computed by LAA.
5707   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5708   // the memory accesses that is most restrictive (involved in the smallest
5709   // dependence distance).
5710   unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits();
5711 
5712   // If the user vectorization factor is legally unsafe, clamp it to a safe
5713   // value. Otherwise, return as is.
5714   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5715     unsigned MaxSafeElements =
5716         PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType);
5717     ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements);
5718 
5719     if (UserVF.isScalable()) {
5720       Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5721 
5722       // Scale VF by vscale before checking if it's safe.
5723       MaxSafeVF = ElementCount::getScalable(
5724           MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5725 
5726       if (MaxSafeVF.isZero()) {
5727         // The dependence distance is too small to use scalable vectors,
5728         // fallback on fixed.
5729         LLVM_DEBUG(
5730             dbgs()
5731             << "LV: Max legal vector width too small, scalable vectorization "
5732                "unfeasible. Using fixed-width vectorization instead.\n");
5733         ORE->emit([&]() {
5734           return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible",
5735                                             TheLoop->getStartLoc(),
5736                                             TheLoop->getHeader())
5737                  << "Max legal vector width too small, scalable vectorization "
5738                  << "unfeasible. Using fixed-width vectorization instead.";
5739         });
5740         return computeFeasibleMaxVF(
5741             ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5742       }
5743     }
5744 
5745     LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n");
5746 
5747     if (ElementCount::isKnownLE(UserVF, MaxSafeVF))
5748       return UserVF;
5749 
5750     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5751                       << " is unsafe, clamping to max safe VF=" << MaxSafeVF
5752                       << ".\n");
5753     ORE->emit([&]() {
5754       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5755                                         TheLoop->getStartLoc(),
5756                                         TheLoop->getHeader())
5757              << "User-specified vectorization factor "
5758              << ore::NV("UserVectorizationFactor", UserVF)
5759              << " is unsafe, clamping to maximum safe vectorization factor "
5760              << ore::NV("VectorizationFactor", MaxSafeVF);
5761     });
5762     return MaxSafeVF;
5763   }
5764 
5765   WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits);
5766 
5767   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5768   // Note that both WidestRegister and WidestType may not be a powers of 2.
5769   auto MaxVectorSize =
5770       ElementCount::getFixed(PowerOf2Floor(WidestRegister / WidestType));
5771 
5772   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5773                     << " / " << WidestType << " bits.\n");
5774   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5775                     << WidestRegister << " bits.\n");
5776 
5777   assert(MaxVectorSize.getFixedValue() <= WidestRegister &&
5778          "Did not expect to pack so many elements"
5779          " into one vector!");
5780   if (MaxVectorSize.getFixedValue() == 0) {
5781     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5782     return ElementCount::getFixed(1);
5783   } else if (ConstTripCount && ConstTripCount < MaxVectorSize.getFixedValue() &&
5784              isPowerOf2_32(ConstTripCount)) {
5785     // We need to clamp the VF to be the ConstTripCount. There is no point in
5786     // choosing a higher viable VF as done in the loop below.
5787     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5788                       << ConstTripCount << "\n");
5789     return ElementCount::getFixed(ConstTripCount);
5790   }
5791 
5792   ElementCount MaxVF = MaxVectorSize;
5793   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5794       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5795     // Collect all viable vectorization factors larger than the default MaxVF
5796     // (i.e. MaxVectorSize).
5797     SmallVector<ElementCount, 8> VFs;
5798     auto MaxVectorSizeMaxBW =
5799         ElementCount::getFixed(WidestRegister / SmallestType);
5800     for (ElementCount VS = MaxVectorSize * 2;
5801          ElementCount::isKnownLE(VS, MaxVectorSizeMaxBW); VS *= 2)
5802       VFs.push_back(VS);
5803 
5804     // For each VF calculate its register usage.
5805     auto RUs = calculateRegisterUsage(VFs);
5806 
5807     // Select the largest VF which doesn't require more registers than existing
5808     // ones.
5809     for (int i = RUs.size() - 1; i >= 0; --i) {
5810       bool Selected = true;
5811       for (auto &pair : RUs[i].MaxLocalUsers) {
5812         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5813         if (pair.second > TargetNumRegisters)
5814           Selected = false;
5815       }
5816       if (Selected) {
5817         MaxVF = VFs[i];
5818         break;
5819       }
5820     }
5821     if (ElementCount MinVF =
5822             TTI.getMinimumVF(SmallestType, /*IsScalable=*/false)) {
5823       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5824         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5825                           << ") with target's minimum: " << MinVF << '\n');
5826         MaxVF = MinVF;
5827       }
5828     }
5829   }
5830   return MaxVF;
5831 }
5832 
5833 VectorizationFactor
5834 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) {
5835   // FIXME: This can be fixed for scalable vectors later, because at this stage
5836   // the LoopVectorizer will only consider vectorizing a loop with scalable
5837   // vectors when the loop has a hint to enable vectorization for a given VF.
5838   assert(!MaxVF.isScalable() && "scalable vectors not yet supported");
5839 
5840   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5841   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5842   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5843 
5844   auto Width = ElementCount::getFixed(1);
5845   const float ScalarCost = *ExpectedCost.getValue();
5846   float Cost = ScalarCost;
5847 
5848   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5849   if (ForceVectorization && MaxVF.isVector()) {
5850     // Ignore scalar width, because the user explicitly wants vectorization.
5851     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5852     // evaluation.
5853     Cost = std::numeric_limits<float>::max();
5854   }
5855 
5856   for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF);
5857        i *= 2) {
5858     // Notice that the vector loop needs to be executed less times, so
5859     // we need to divide the cost of the vector loops by the width of
5860     // the vector elements.
5861     VectorizationCostTy C = expectedCost(i);
5862     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
5863     float VectorCost = *C.first.getValue() / (float)i.getFixedValue();
5864     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5865                       << " costs: " << (int)VectorCost << ".\n");
5866     if (!C.second && !ForceVectorization) {
5867       LLVM_DEBUG(
5868           dbgs() << "LV: Not considering vector loop of width " << i
5869                  << " because it will not generate any vector instructions.\n");
5870       continue;
5871     }
5872 
5873     // If profitable add it to ProfitableVF list.
5874     if (VectorCost < ScalarCost) {
5875       ProfitableVFs.push_back(VectorizationFactor(
5876           {i, (unsigned)VectorCost}));
5877     }
5878 
5879     if (VectorCost < Cost) {
5880       Cost = VectorCost;
5881       Width = i;
5882     }
5883   }
5884 
5885   if (!EnableCondStoresVectorization && NumPredStores) {
5886     reportVectorizationFailure("There are conditional stores.",
5887         "store that is conditionally executed prevents vectorization",
5888         "ConditionalStore", ORE, TheLoop);
5889     Width = ElementCount::getFixed(1);
5890     Cost = ScalarCost;
5891   }
5892 
5893   LLVM_DEBUG(if (ForceVectorization && !Width.isScalar() && Cost >= ScalarCost) dbgs()
5894              << "LV: Vectorization seems to be not beneficial, "
5895              << "but was forced by a user.\n");
5896   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5897   VectorizationFactor Factor = {Width,
5898                                 (unsigned)(Width.getKnownMinValue() * Cost)};
5899   return Factor;
5900 }
5901 
5902 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5903     const Loop &L, ElementCount VF) const {
5904   // Cross iteration phis such as reductions need special handling and are
5905   // currently unsupported.
5906   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5907         return Legal->isFirstOrderRecurrence(&Phi) ||
5908                Legal->isReductionVariable(&Phi);
5909       }))
5910     return false;
5911 
5912   // Phis with uses outside of the loop require special handling and are
5913   // currently unsupported.
5914   for (auto &Entry : Legal->getInductionVars()) {
5915     // Look for uses of the value of the induction at the last iteration.
5916     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5917     for (User *U : PostInc->users())
5918       if (!L.contains(cast<Instruction>(U)))
5919         return false;
5920     // Look for uses of penultimate value of the induction.
5921     for (User *U : Entry.first->users())
5922       if (!L.contains(cast<Instruction>(U)))
5923         return false;
5924   }
5925 
5926   // Induction variables that are widened require special handling that is
5927   // currently not supported.
5928   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5929         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5930                  this->isProfitableToScalarize(Entry.first, VF));
5931       }))
5932     return false;
5933 
5934   return true;
5935 }
5936 
5937 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5938     const ElementCount VF) const {
5939   // FIXME: We need a much better cost-model to take different parameters such
5940   // as register pressure, code size increase and cost of extra branches into
5941   // account. For now we apply a very crude heuristic and only consider loops
5942   // with vectorization factors larger than a certain value.
5943   // We also consider epilogue vectorization unprofitable for targets that don't
5944   // consider interleaving beneficial (eg. MVE).
5945   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5946     return false;
5947   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5948     return true;
5949   return false;
5950 }
5951 
5952 VectorizationFactor
5953 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5954     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5955   VectorizationFactor Result = VectorizationFactor::Disabled();
5956   if (!EnableEpilogueVectorization) {
5957     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5958     return Result;
5959   }
5960 
5961   if (!isScalarEpilogueAllowed()) {
5962     LLVM_DEBUG(
5963         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5964                   "allowed.\n";);
5965     return Result;
5966   }
5967 
5968   // FIXME: This can be fixed for scalable vectors later, because at this stage
5969   // the LoopVectorizer will only consider vectorizing a loop with scalable
5970   // vectors when the loop has a hint to enable vectorization for a given VF.
5971   if (MainLoopVF.isScalable()) {
5972     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
5973                          "yet supported.\n");
5974     return Result;
5975   }
5976 
5977   // Not really a cost consideration, but check for unsupported cases here to
5978   // simplify the logic.
5979   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5980     LLVM_DEBUG(
5981         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5982                   "not a supported candidate.\n";);
5983     return Result;
5984   }
5985 
5986   if (EpilogueVectorizationForceVF > 1) {
5987     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5988     if (LVP.hasPlanWithVFs(
5989             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
5990       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
5991     else {
5992       LLVM_DEBUG(
5993           dbgs()
5994               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5995       return Result;
5996     }
5997   }
5998 
5999   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6000       TheLoop->getHeader()->getParent()->hasMinSize()) {
6001     LLVM_DEBUG(
6002         dbgs()
6003             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6004     return Result;
6005   }
6006 
6007   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6008     return Result;
6009 
6010   for (auto &NextVF : ProfitableVFs)
6011     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6012         (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) &&
6013         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6014       Result = NextVF;
6015 
6016   if (Result != VectorizationFactor::Disabled())
6017     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6018                       << Result.Width.getFixedValue() << "\n";);
6019   return Result;
6020 }
6021 
6022 std::pair<unsigned, unsigned>
6023 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6024   unsigned MinWidth = -1U;
6025   unsigned MaxWidth = 8;
6026   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6027 
6028   // For each block.
6029   for (BasicBlock *BB : TheLoop->blocks()) {
6030     // For each instruction in the loop.
6031     for (Instruction &I : BB->instructionsWithoutDebug()) {
6032       Type *T = I.getType();
6033 
6034       // Skip ignored values.
6035       if (ValuesToIgnore.count(&I))
6036         continue;
6037 
6038       // Only examine Loads, Stores and PHINodes.
6039       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6040         continue;
6041 
6042       // Examine PHI nodes that are reduction variables. Update the type to
6043       // account for the recurrence type.
6044       if (auto *PN = dyn_cast<PHINode>(&I)) {
6045         if (!Legal->isReductionVariable(PN))
6046           continue;
6047         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
6048         if (PreferInLoopReductions ||
6049             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6050                                       RdxDesc.getRecurrenceType(),
6051                                       TargetTransformInfo::ReductionFlags()))
6052           continue;
6053         T = RdxDesc.getRecurrenceType();
6054       }
6055 
6056       // Examine the stored values.
6057       if (auto *ST = dyn_cast<StoreInst>(&I))
6058         T = ST->getValueOperand()->getType();
6059 
6060       // Ignore loaded pointer types and stored pointer types that are not
6061       // vectorizable.
6062       //
6063       // FIXME: The check here attempts to predict whether a load or store will
6064       //        be vectorized. We only know this for certain after a VF has
6065       //        been selected. Here, we assume that if an access can be
6066       //        vectorized, it will be. We should also look at extending this
6067       //        optimization to non-pointer types.
6068       //
6069       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6070           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6071         continue;
6072 
6073       MinWidth = std::min(MinWidth,
6074                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6075       MaxWidth = std::max(MaxWidth,
6076                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6077     }
6078   }
6079 
6080   return {MinWidth, MaxWidth};
6081 }
6082 
6083 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6084                                                            unsigned LoopCost) {
6085   // -- The interleave heuristics --
6086   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6087   // There are many micro-architectural considerations that we can't predict
6088   // at this level. For example, frontend pressure (on decode or fetch) due to
6089   // code size, or the number and capabilities of the execution ports.
6090   //
6091   // We use the following heuristics to select the interleave count:
6092   // 1. If the code has reductions, then we interleave to break the cross
6093   // iteration dependency.
6094   // 2. If the loop is really small, then we interleave to reduce the loop
6095   // overhead.
6096   // 3. We don't interleave if we think that we will spill registers to memory
6097   // due to the increased register pressure.
6098 
6099   if (!isScalarEpilogueAllowed())
6100     return 1;
6101 
6102   // We used the distance for the interleave count.
6103   if (Legal->getMaxSafeDepDistBytes() != -1U)
6104     return 1;
6105 
6106   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6107   const bool HasReductions = !Legal->getReductionVars().empty();
6108   // Do not interleave loops with a relatively small known or estimated trip
6109   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6110   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6111   // because with the above conditions interleaving can expose ILP and break
6112   // cross iteration dependences for reductions.
6113   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6114       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6115     return 1;
6116 
6117   RegisterUsage R = calculateRegisterUsage({VF})[0];
6118   // We divide by these constants so assume that we have at least one
6119   // instruction that uses at least one register.
6120   for (auto& pair : R.MaxLocalUsers) {
6121     pair.second = std::max(pair.second, 1U);
6122   }
6123 
6124   // We calculate the interleave count using the following formula.
6125   // Subtract the number of loop invariants from the number of available
6126   // registers. These registers are used by all of the interleaved instances.
6127   // Next, divide the remaining registers by the number of registers that is
6128   // required by the loop, in order to estimate how many parallel instances
6129   // fit without causing spills. All of this is rounded down if necessary to be
6130   // a power of two. We want power of two interleave count to simplify any
6131   // addressing operations or alignment considerations.
6132   // We also want power of two interleave counts to ensure that the induction
6133   // variable of the vector loop wraps to zero, when tail is folded by masking;
6134   // this currently happens when OptForSize, in which case IC is set to 1 above.
6135   unsigned IC = UINT_MAX;
6136 
6137   for (auto& pair : R.MaxLocalUsers) {
6138     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6139     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6140                       << " registers of "
6141                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6142     if (VF.isScalar()) {
6143       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6144         TargetNumRegisters = ForceTargetNumScalarRegs;
6145     } else {
6146       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6147         TargetNumRegisters = ForceTargetNumVectorRegs;
6148     }
6149     unsigned MaxLocalUsers = pair.second;
6150     unsigned LoopInvariantRegs = 0;
6151     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6152       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6153 
6154     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6155     // Don't count the induction variable as interleaved.
6156     if (EnableIndVarRegisterHeur) {
6157       TmpIC =
6158           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6159                         std::max(1U, (MaxLocalUsers - 1)));
6160     }
6161 
6162     IC = std::min(IC, TmpIC);
6163   }
6164 
6165   // Clamp the interleave ranges to reasonable counts.
6166   unsigned MaxInterleaveCount =
6167       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6168 
6169   // Check if the user has overridden the max.
6170   if (VF.isScalar()) {
6171     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6172       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6173   } else {
6174     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6175       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6176   }
6177 
6178   // If trip count is known or estimated compile time constant, limit the
6179   // interleave count to be less than the trip count divided by VF, provided it
6180   // is at least 1.
6181   //
6182   // For scalable vectors we can't know if interleaving is beneficial. It may
6183   // not be beneficial for small loops if none of the lanes in the second vector
6184   // iterations is enabled. However, for larger loops, there is likely to be a
6185   // similar benefit as for fixed-width vectors. For now, we choose to leave
6186   // the InterleaveCount as if vscale is '1', although if some information about
6187   // the vector is known (e.g. min vector size), we can make a better decision.
6188   if (BestKnownTC) {
6189     MaxInterleaveCount =
6190         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6191     // Make sure MaxInterleaveCount is greater than 0.
6192     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6193   }
6194 
6195   assert(MaxInterleaveCount > 0 &&
6196          "Maximum interleave count must be greater than 0");
6197 
6198   // Clamp the calculated IC to be between the 1 and the max interleave count
6199   // that the target and trip count allows.
6200   if (IC > MaxInterleaveCount)
6201     IC = MaxInterleaveCount;
6202   else
6203     // Make sure IC is greater than 0.
6204     IC = std::max(1u, IC);
6205 
6206   assert(IC > 0 && "Interleave count must be greater than 0.");
6207 
6208   // If we did not calculate the cost for VF (because the user selected the VF)
6209   // then we calculate the cost of VF here.
6210   if (LoopCost == 0) {
6211     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6212     LoopCost = *expectedCost(VF).first.getValue();
6213   }
6214 
6215   assert(LoopCost && "Non-zero loop cost expected");
6216 
6217   // Interleave if we vectorized this loop and there is a reduction that could
6218   // benefit from interleaving.
6219   if (VF.isVector() && HasReductions) {
6220     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6221     return IC;
6222   }
6223 
6224   // Note that if we've already vectorized the loop we will have done the
6225   // runtime check and so interleaving won't require further checks.
6226   bool InterleavingRequiresRuntimePointerCheck =
6227       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6228 
6229   // We want to interleave small loops in order to reduce the loop overhead and
6230   // potentially expose ILP opportunities.
6231   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6232                     << "LV: IC is " << IC << '\n'
6233                     << "LV: VF is " << VF << '\n');
6234   const bool AggressivelyInterleaveReductions =
6235       TTI.enableAggressiveInterleaving(HasReductions);
6236   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6237     // We assume that the cost overhead is 1 and we use the cost model
6238     // to estimate the cost of the loop and interleave until the cost of the
6239     // loop overhead is about 5% of the cost of the loop.
6240     unsigned SmallIC =
6241         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6242 
6243     // Interleave until store/load ports (estimated by max interleave count) are
6244     // saturated.
6245     unsigned NumStores = Legal->getNumStores();
6246     unsigned NumLoads = Legal->getNumLoads();
6247     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6248     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6249 
6250     // If we have a scalar reduction (vector reductions are already dealt with
6251     // by this point), we can increase the critical path length if the loop
6252     // we're interleaving is inside another loop. Limit, by default to 2, so the
6253     // critical path only gets increased by one reduction operation.
6254     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6255       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6256       SmallIC = std::min(SmallIC, F);
6257       StoresIC = std::min(StoresIC, F);
6258       LoadsIC = std::min(LoadsIC, F);
6259     }
6260 
6261     if (EnableLoadStoreRuntimeInterleave &&
6262         std::max(StoresIC, LoadsIC) > SmallIC) {
6263       LLVM_DEBUG(
6264           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6265       return std::max(StoresIC, LoadsIC);
6266     }
6267 
6268     // If there are scalar reductions and TTI has enabled aggressive
6269     // interleaving for reductions, we will interleave to expose ILP.
6270     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6271         AggressivelyInterleaveReductions) {
6272       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6273       // Interleave no less than SmallIC but not as aggressive as the normal IC
6274       // to satisfy the rare situation when resources are too limited.
6275       return std::max(IC / 2, SmallIC);
6276     } else {
6277       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6278       return SmallIC;
6279     }
6280   }
6281 
6282   // Interleave if this is a large loop (small loops are already dealt with by
6283   // this point) that could benefit from interleaving.
6284   if (AggressivelyInterleaveReductions) {
6285     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6286     return IC;
6287   }
6288 
6289   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6290   return 1;
6291 }
6292 
6293 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6294 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6295   // This function calculates the register usage by measuring the highest number
6296   // of values that are alive at a single location. Obviously, this is a very
6297   // rough estimation. We scan the loop in a topological order in order and
6298   // assign a number to each instruction. We use RPO to ensure that defs are
6299   // met before their users. We assume that each instruction that has in-loop
6300   // users starts an interval. We record every time that an in-loop value is
6301   // used, so we have a list of the first and last occurrences of each
6302   // instruction. Next, we transpose this data structure into a multi map that
6303   // holds the list of intervals that *end* at a specific location. This multi
6304   // map allows us to perform a linear search. We scan the instructions linearly
6305   // and record each time that a new interval starts, by placing it in a set.
6306   // If we find this value in the multi-map then we remove it from the set.
6307   // The max register usage is the maximum size of the set.
6308   // We also search for instructions that are defined outside the loop, but are
6309   // used inside the loop. We need this number separately from the max-interval
6310   // usage number because when we unroll, loop-invariant values do not take
6311   // more register.
6312   LoopBlocksDFS DFS(TheLoop);
6313   DFS.perform(LI);
6314 
6315   RegisterUsage RU;
6316 
6317   // Each 'key' in the map opens a new interval. The values
6318   // of the map are the index of the 'last seen' usage of the
6319   // instruction that is the key.
6320   using IntervalMap = DenseMap<Instruction *, unsigned>;
6321 
6322   // Maps instruction to its index.
6323   SmallVector<Instruction *, 64> IdxToInstr;
6324   // Marks the end of each interval.
6325   IntervalMap EndPoint;
6326   // Saves the list of instruction indices that are used in the loop.
6327   SmallPtrSet<Instruction *, 8> Ends;
6328   // Saves the list of values that are used in the loop but are
6329   // defined outside the loop, such as arguments and constants.
6330   SmallPtrSet<Value *, 8> LoopInvariants;
6331 
6332   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6333     for (Instruction &I : BB->instructionsWithoutDebug()) {
6334       IdxToInstr.push_back(&I);
6335 
6336       // Save the end location of each USE.
6337       for (Value *U : I.operands()) {
6338         auto *Instr = dyn_cast<Instruction>(U);
6339 
6340         // Ignore non-instruction values such as arguments, constants, etc.
6341         if (!Instr)
6342           continue;
6343 
6344         // If this instruction is outside the loop then record it and continue.
6345         if (!TheLoop->contains(Instr)) {
6346           LoopInvariants.insert(Instr);
6347           continue;
6348         }
6349 
6350         // Overwrite previous end points.
6351         EndPoint[Instr] = IdxToInstr.size();
6352         Ends.insert(Instr);
6353       }
6354     }
6355   }
6356 
6357   // Saves the list of intervals that end with the index in 'key'.
6358   using InstrList = SmallVector<Instruction *, 2>;
6359   DenseMap<unsigned, InstrList> TransposeEnds;
6360 
6361   // Transpose the EndPoints to a list of values that end at each index.
6362   for (auto &Interval : EndPoint)
6363     TransposeEnds[Interval.second].push_back(Interval.first);
6364 
6365   SmallPtrSet<Instruction *, 8> OpenIntervals;
6366   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6367   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6368 
6369   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6370 
6371   // A lambda that gets the register usage for the given type and VF.
6372   const auto &TTICapture = TTI;
6373   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6374     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6375       return 0U;
6376     return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
6377   };
6378 
6379   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6380     Instruction *I = IdxToInstr[i];
6381 
6382     // Remove all of the instructions that end at this location.
6383     InstrList &List = TransposeEnds[i];
6384     for (Instruction *ToRemove : List)
6385       OpenIntervals.erase(ToRemove);
6386 
6387     // Ignore instructions that are never used within the loop.
6388     if (!Ends.count(I))
6389       continue;
6390 
6391     // Skip ignored values.
6392     if (ValuesToIgnore.count(I))
6393       continue;
6394 
6395     // For each VF find the maximum usage of registers.
6396     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6397       // Count the number of live intervals.
6398       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6399 
6400       if (VFs[j].isScalar()) {
6401         for (auto Inst : OpenIntervals) {
6402           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6403           if (RegUsage.find(ClassID) == RegUsage.end())
6404             RegUsage[ClassID] = 1;
6405           else
6406             RegUsage[ClassID] += 1;
6407         }
6408       } else {
6409         collectUniformsAndScalars(VFs[j]);
6410         for (auto Inst : OpenIntervals) {
6411           // Skip ignored values for VF > 1.
6412           if (VecValuesToIgnore.count(Inst))
6413             continue;
6414           if (isScalarAfterVectorization(Inst, VFs[j])) {
6415             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6416             if (RegUsage.find(ClassID) == RegUsage.end())
6417               RegUsage[ClassID] = 1;
6418             else
6419               RegUsage[ClassID] += 1;
6420           } else {
6421             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6422             if (RegUsage.find(ClassID) == RegUsage.end())
6423               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6424             else
6425               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6426           }
6427         }
6428       }
6429 
6430       for (auto& pair : RegUsage) {
6431         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6432           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6433         else
6434           MaxUsages[j][pair.first] = pair.second;
6435       }
6436     }
6437 
6438     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6439                       << OpenIntervals.size() << '\n');
6440 
6441     // Add the current instruction to the list of open intervals.
6442     OpenIntervals.insert(I);
6443   }
6444 
6445   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6446     SmallMapVector<unsigned, unsigned, 4> Invariant;
6447 
6448     for (auto Inst : LoopInvariants) {
6449       unsigned Usage =
6450           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6451       unsigned ClassID =
6452           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6453       if (Invariant.find(ClassID) == Invariant.end())
6454         Invariant[ClassID] = Usage;
6455       else
6456         Invariant[ClassID] += Usage;
6457     }
6458 
6459     LLVM_DEBUG({
6460       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6461       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6462              << " item\n";
6463       for (const auto &pair : MaxUsages[i]) {
6464         dbgs() << "LV(REG): RegisterClass: "
6465                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6466                << " registers\n";
6467       }
6468       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6469              << " item\n";
6470       for (const auto &pair : Invariant) {
6471         dbgs() << "LV(REG): RegisterClass: "
6472                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6473                << " registers\n";
6474       }
6475     });
6476 
6477     RU.LoopInvariantRegs = Invariant;
6478     RU.MaxLocalUsers = MaxUsages[i];
6479     RUs[i] = RU;
6480   }
6481 
6482   return RUs;
6483 }
6484 
6485 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6486   // TODO: Cost model for emulated masked load/store is completely
6487   // broken. This hack guides the cost model to use an artificially
6488   // high enough value to practically disable vectorization with such
6489   // operations, except where previously deployed legality hack allowed
6490   // using very low cost values. This is to avoid regressions coming simply
6491   // from moving "masked load/store" check from legality to cost model.
6492   // Masked Load/Gather emulation was previously never allowed.
6493   // Limited number of Masked Store/Scatter emulation was allowed.
6494   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
6495   return isa<LoadInst>(I) ||
6496          (isa<StoreInst>(I) &&
6497           NumPredStores > NumberOfStoresToPredicate);
6498 }
6499 
6500 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6501   // If we aren't vectorizing the loop, or if we've already collected the
6502   // instructions to scalarize, there's nothing to do. Collection may already
6503   // have occurred if we have a user-selected VF and are now computing the
6504   // expected cost for interleaving.
6505   if (VF.isScalar() || VF.isZero() ||
6506       InstsToScalarize.find(VF) != InstsToScalarize.end())
6507     return;
6508 
6509   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6510   // not profitable to scalarize any instructions, the presence of VF in the
6511   // map will indicate that we've analyzed it already.
6512   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6513 
6514   // Find all the instructions that are scalar with predication in the loop and
6515   // determine if it would be better to not if-convert the blocks they are in.
6516   // If so, we also record the instructions to scalarize.
6517   for (BasicBlock *BB : TheLoop->blocks()) {
6518     if (!blockNeedsPredication(BB))
6519       continue;
6520     for (Instruction &I : *BB)
6521       if (isScalarWithPredication(&I)) {
6522         ScalarCostsTy ScalarCosts;
6523         // Do not apply discount logic if hacked cost is needed
6524         // for emulated masked memrefs.
6525         if (!useEmulatedMaskMemRefHack(&I) &&
6526             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6527           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6528         // Remember that BB will remain after vectorization.
6529         PredicatedBBsAfterVectorization.insert(BB);
6530       }
6531   }
6532 }
6533 
6534 int LoopVectorizationCostModel::computePredInstDiscount(
6535     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6536   assert(!isUniformAfterVectorization(PredInst, VF) &&
6537          "Instruction marked uniform-after-vectorization will be predicated");
6538 
6539   // Initialize the discount to zero, meaning that the scalar version and the
6540   // vector version cost the same.
6541   InstructionCost Discount = 0;
6542 
6543   // Holds instructions to analyze. The instructions we visit are mapped in
6544   // ScalarCosts. Those instructions are the ones that would be scalarized if
6545   // we find that the scalar version costs less.
6546   SmallVector<Instruction *, 8> Worklist;
6547 
6548   // Returns true if the given instruction can be scalarized.
6549   auto canBeScalarized = [&](Instruction *I) -> bool {
6550     // We only attempt to scalarize instructions forming a single-use chain
6551     // from the original predicated block that would otherwise be vectorized.
6552     // Although not strictly necessary, we give up on instructions we know will
6553     // already be scalar to avoid traversing chains that are unlikely to be
6554     // beneficial.
6555     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6556         isScalarAfterVectorization(I, VF))
6557       return false;
6558 
6559     // If the instruction is scalar with predication, it will be analyzed
6560     // separately. We ignore it within the context of PredInst.
6561     if (isScalarWithPredication(I))
6562       return false;
6563 
6564     // If any of the instruction's operands are uniform after vectorization,
6565     // the instruction cannot be scalarized. This prevents, for example, a
6566     // masked load from being scalarized.
6567     //
6568     // We assume we will only emit a value for lane zero of an instruction
6569     // marked uniform after vectorization, rather than VF identical values.
6570     // Thus, if we scalarize an instruction that uses a uniform, we would
6571     // create uses of values corresponding to the lanes we aren't emitting code
6572     // for. This behavior can be changed by allowing getScalarValue to clone
6573     // the lane zero values for uniforms rather than asserting.
6574     for (Use &U : I->operands())
6575       if (auto *J = dyn_cast<Instruction>(U.get()))
6576         if (isUniformAfterVectorization(J, VF))
6577           return false;
6578 
6579     // Otherwise, we can scalarize the instruction.
6580     return true;
6581   };
6582 
6583   // Compute the expected cost discount from scalarizing the entire expression
6584   // feeding the predicated instruction. We currently only consider expressions
6585   // that are single-use instruction chains.
6586   Worklist.push_back(PredInst);
6587   while (!Worklist.empty()) {
6588     Instruction *I = Worklist.pop_back_val();
6589 
6590     // If we've already analyzed the instruction, there's nothing to do.
6591     if (ScalarCosts.find(I) != ScalarCosts.end())
6592       continue;
6593 
6594     // Compute the cost of the vector instruction. Note that this cost already
6595     // includes the scalarization overhead of the predicated instruction.
6596     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6597 
6598     // Compute the cost of the scalarized instruction. This cost is the cost of
6599     // the instruction as if it wasn't if-converted and instead remained in the
6600     // predicated block. We will scale this cost by block probability after
6601     // computing the scalarization overhead.
6602     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6603     InstructionCost ScalarCost =
6604         VF.getKnownMinValue() *
6605         getInstructionCost(I, ElementCount::getFixed(1)).first;
6606 
6607     // Compute the scalarization overhead of needed insertelement instructions
6608     // and phi nodes.
6609     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6610       ScalarCost += TTI.getScalarizationOverhead(
6611           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6612           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6613       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6614       ScalarCost +=
6615           VF.getKnownMinValue() *
6616           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6617     }
6618 
6619     // Compute the scalarization overhead of needed extractelement
6620     // instructions. For each of the instruction's operands, if the operand can
6621     // be scalarized, add it to the worklist; otherwise, account for the
6622     // overhead.
6623     for (Use &U : I->operands())
6624       if (auto *J = dyn_cast<Instruction>(U.get())) {
6625         assert(VectorType::isValidElementType(J->getType()) &&
6626                "Instruction has non-scalar type");
6627         if (canBeScalarized(J))
6628           Worklist.push_back(J);
6629         else if (needsExtract(J, VF)) {
6630           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6631           ScalarCost += TTI.getScalarizationOverhead(
6632               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6633               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6634         }
6635       }
6636 
6637     // Scale the total scalar cost by block probability.
6638     ScalarCost /= getReciprocalPredBlockProb();
6639 
6640     // Compute the discount. A non-negative discount means the vector version
6641     // of the instruction costs more, and scalarizing would be beneficial.
6642     Discount += VectorCost - ScalarCost;
6643     ScalarCosts[I] = ScalarCost;
6644   }
6645 
6646   return *Discount.getValue();
6647 }
6648 
6649 LoopVectorizationCostModel::VectorizationCostTy
6650 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6651   VectorizationCostTy Cost;
6652 
6653   // For each block.
6654   for (BasicBlock *BB : TheLoop->blocks()) {
6655     VectorizationCostTy BlockCost;
6656 
6657     // For each instruction in the old loop.
6658     for (Instruction &I : BB->instructionsWithoutDebug()) {
6659       // Skip ignored values.
6660       if (ValuesToIgnore.count(&I) ||
6661           (VF.isVector() && VecValuesToIgnore.count(&I)))
6662         continue;
6663 
6664       VectorizationCostTy C = getInstructionCost(&I, VF);
6665 
6666       // Check if we should override the cost.
6667       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6668         C.first = InstructionCost(ForceTargetInstructionCost);
6669 
6670       BlockCost.first += C.first;
6671       BlockCost.second |= C.second;
6672       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6673                         << " for VF " << VF << " For instruction: " << I
6674                         << '\n');
6675     }
6676 
6677     // If we are vectorizing a predicated block, it will have been
6678     // if-converted. This means that the block's instructions (aside from
6679     // stores and instructions that may divide by zero) will now be
6680     // unconditionally executed. For the scalar case, we may not always execute
6681     // the predicated block, if it is an if-else block. Thus, scale the block's
6682     // cost by the probability of executing it. blockNeedsPredication from
6683     // Legal is used so as to not include all blocks in tail folded loops.
6684     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6685       BlockCost.first /= getReciprocalPredBlockProb();
6686 
6687     Cost.first += BlockCost.first;
6688     Cost.second |= BlockCost.second;
6689   }
6690 
6691   return Cost;
6692 }
6693 
6694 /// Gets Address Access SCEV after verifying that the access pattern
6695 /// is loop invariant except the induction variable dependence.
6696 ///
6697 /// This SCEV can be sent to the Target in order to estimate the address
6698 /// calculation cost.
6699 static const SCEV *getAddressAccessSCEV(
6700               Value *Ptr,
6701               LoopVectorizationLegality *Legal,
6702               PredicatedScalarEvolution &PSE,
6703               const Loop *TheLoop) {
6704 
6705   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6706   if (!Gep)
6707     return nullptr;
6708 
6709   // We are looking for a gep with all loop invariant indices except for one
6710   // which should be an induction variable.
6711   auto SE = PSE.getSE();
6712   unsigned NumOperands = Gep->getNumOperands();
6713   for (unsigned i = 1; i < NumOperands; ++i) {
6714     Value *Opd = Gep->getOperand(i);
6715     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6716         !Legal->isInductionVariable(Opd))
6717       return nullptr;
6718   }
6719 
6720   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6721   return PSE.getSCEV(Ptr);
6722 }
6723 
6724 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6725   return Legal->hasStride(I->getOperand(0)) ||
6726          Legal->hasStride(I->getOperand(1));
6727 }
6728 
6729 InstructionCost
6730 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6731                                                         ElementCount VF) {
6732   assert(VF.isVector() &&
6733          "Scalarization cost of instruction implies vectorization.");
6734   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6735   Type *ValTy = getMemInstValueType(I);
6736   auto SE = PSE.getSE();
6737 
6738   unsigned AS = getLoadStoreAddressSpace(I);
6739   Value *Ptr = getLoadStorePointerOperand(I);
6740   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6741 
6742   // Figure out whether the access is strided and get the stride value
6743   // if it's known in compile time
6744   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6745 
6746   // Get the cost of the scalar memory instruction and address computation.
6747   InstructionCost Cost =
6748       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6749 
6750   // Don't pass *I here, since it is scalar but will actually be part of a
6751   // vectorized loop where the user of it is a vectorized instruction.
6752   const Align Alignment = getLoadStoreAlignment(I);
6753   Cost += VF.getKnownMinValue() *
6754           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6755                               AS, TTI::TCK_RecipThroughput);
6756 
6757   // Get the overhead of the extractelement and insertelement instructions
6758   // we might create due to scalarization.
6759   Cost += getScalarizationOverhead(I, VF);
6760 
6761   // If we have a predicated store, it may not be executed for each vector
6762   // lane. Scale the cost by the probability of executing the predicated
6763   // block.
6764   if (isPredicatedInst(I)) {
6765     Cost /= getReciprocalPredBlockProb();
6766 
6767     if (useEmulatedMaskMemRefHack(I))
6768       // Artificially setting to a high enough value to practically disable
6769       // vectorization with such operations.
6770       Cost = 3000000;
6771   }
6772 
6773   return Cost;
6774 }
6775 
6776 InstructionCost
6777 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6778                                                     ElementCount VF) {
6779   Type *ValTy = getMemInstValueType(I);
6780   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6781   Value *Ptr = getLoadStorePointerOperand(I);
6782   unsigned AS = getLoadStoreAddressSpace(I);
6783   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6784   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6785 
6786   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6787          "Stride should be 1 or -1 for consecutive memory access");
6788   const Align Alignment = getLoadStoreAlignment(I);
6789   InstructionCost Cost = 0;
6790   if (Legal->isMaskRequired(I))
6791     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6792                                       CostKind);
6793   else
6794     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6795                                 CostKind, I);
6796 
6797   bool Reverse = ConsecutiveStride < 0;
6798   if (Reverse)
6799     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6800   return Cost;
6801 }
6802 
6803 InstructionCost
6804 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6805                                                 ElementCount VF) {
6806   assert(Legal->isUniformMemOp(*I));
6807 
6808   Type *ValTy = getMemInstValueType(I);
6809   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6810   const Align Alignment = getLoadStoreAlignment(I);
6811   unsigned AS = getLoadStoreAddressSpace(I);
6812   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6813   if (isa<LoadInst>(I)) {
6814     return TTI.getAddressComputationCost(ValTy) +
6815            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6816                                CostKind) +
6817            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6818   }
6819   StoreInst *SI = cast<StoreInst>(I);
6820 
6821   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6822   return TTI.getAddressComputationCost(ValTy) +
6823          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6824                              CostKind) +
6825          (isLoopInvariantStoreValue
6826               ? 0
6827               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6828                                        VF.getKnownMinValue() - 1));
6829 }
6830 
6831 InstructionCost
6832 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6833                                                  ElementCount VF) {
6834   Type *ValTy = getMemInstValueType(I);
6835   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6836   const Align Alignment = getLoadStoreAlignment(I);
6837   const Value *Ptr = getLoadStorePointerOperand(I);
6838 
6839   return TTI.getAddressComputationCost(VectorTy) +
6840          TTI.getGatherScatterOpCost(
6841              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6842              TargetTransformInfo::TCK_RecipThroughput, I);
6843 }
6844 
6845 InstructionCost
6846 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6847                                                    ElementCount VF) {
6848   // TODO: Once we have support for interleaving with scalable vectors
6849   // we can calculate the cost properly here.
6850   if (VF.isScalable())
6851     return InstructionCost::getInvalid();
6852 
6853   Type *ValTy = getMemInstValueType(I);
6854   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6855   unsigned AS = getLoadStoreAddressSpace(I);
6856 
6857   auto Group = getInterleavedAccessGroup(I);
6858   assert(Group && "Fail to get an interleaved access group.");
6859 
6860   unsigned InterleaveFactor = Group->getFactor();
6861   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6862 
6863   // Holds the indices of existing members in an interleaved load group.
6864   // An interleaved store group doesn't need this as it doesn't allow gaps.
6865   SmallVector<unsigned, 4> Indices;
6866   if (isa<LoadInst>(I)) {
6867     for (unsigned i = 0; i < InterleaveFactor; i++)
6868       if (Group->getMember(i))
6869         Indices.push_back(i);
6870   }
6871 
6872   // Calculate the cost of the whole interleaved group.
6873   bool UseMaskForGaps =
6874       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6875   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6876       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6877       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6878 
6879   if (Group->isReverse()) {
6880     // TODO: Add support for reversed masked interleaved access.
6881     assert(!Legal->isMaskRequired(I) &&
6882            "Reverse masked interleaved access not supported.");
6883     Cost += Group->getNumMembers() *
6884             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6885   }
6886   return Cost;
6887 }
6888 
6889 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
6890     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6891   // Early exit for no inloop reductions
6892   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6893     return InstructionCost::getInvalid();
6894   auto *VectorTy = cast<VectorType>(Ty);
6895 
6896   // We are looking for a pattern of, and finding the minimal acceptable cost:
6897   //  reduce(mul(ext(A), ext(B))) or
6898   //  reduce(mul(A, B)) or
6899   //  reduce(ext(A)) or
6900   //  reduce(A).
6901   // The basic idea is that we walk down the tree to do that, finding the root
6902   // reduction instruction in InLoopReductionImmediateChains. From there we find
6903   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6904   // of the components. If the reduction cost is lower then we return it for the
6905   // reduction instruction and 0 for the other instructions in the pattern. If
6906   // it is not we return an invalid cost specifying the orignal cost method
6907   // should be used.
6908   Instruction *RetI = I;
6909   if ((RetI->getOpcode() == Instruction::SExt ||
6910        RetI->getOpcode() == Instruction::ZExt)) {
6911     if (!RetI->hasOneUser())
6912       return InstructionCost::getInvalid();
6913     RetI = RetI->user_back();
6914   }
6915   if (RetI->getOpcode() == Instruction::Mul &&
6916       RetI->user_back()->getOpcode() == Instruction::Add) {
6917     if (!RetI->hasOneUser())
6918       return InstructionCost::getInvalid();
6919     RetI = RetI->user_back();
6920   }
6921 
6922   // Test if the found instruction is a reduction, and if not return an invalid
6923   // cost specifying the parent to use the original cost modelling.
6924   if (!InLoopReductionImmediateChains.count(RetI))
6925     return InstructionCost::getInvalid();
6926 
6927   // Find the reduction this chain is a part of and calculate the basic cost of
6928   // the reduction on its own.
6929   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6930   Instruction *ReductionPhi = LastChain;
6931   while (!isa<PHINode>(ReductionPhi))
6932     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6933 
6934   RecurrenceDescriptor RdxDesc =
6935       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
6936   unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(),
6937                                                      VectorTy, false, CostKind);
6938 
6939   // Get the operand that was not the reduction chain and match it to one of the
6940   // patterns, returning the better cost if it is found.
6941   Instruction *RedOp = RetI->getOperand(1) == LastChain
6942                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6943                            : dyn_cast<Instruction>(RetI->getOperand(1));
6944 
6945   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6946 
6947   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
6948       !TheLoop->isLoopInvariant(RedOp)) {
6949     bool IsUnsigned = isa<ZExtInst>(RedOp);
6950     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6951     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6952         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6953         CostKind);
6954 
6955     unsigned ExtCost =
6956         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6957                              TTI::CastContextHint::None, CostKind, RedOp);
6958     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6959       return I == RetI ? *RedCost.getValue() : 0;
6960   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
6961     Instruction *Mul = RedOp;
6962     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
6963     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
6964     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
6965         Op0->getOpcode() == Op1->getOpcode() &&
6966         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6967         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6968       bool IsUnsigned = isa<ZExtInst>(Op0);
6969       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6970       // reduce(mul(ext, ext))
6971       unsigned ExtCost =
6972           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
6973                                TTI::CastContextHint::None, CostKind, Op0);
6974       InstructionCost MulCost =
6975           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6976 
6977       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6978           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6979           CostKind);
6980 
6981       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
6982         return I == RetI ? *RedCost.getValue() : 0;
6983     } else {
6984       InstructionCost MulCost =
6985           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6986 
6987       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6988           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6989           CostKind);
6990 
6991       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6992         return I == RetI ? *RedCost.getValue() : 0;
6993     }
6994   }
6995 
6996   return I == RetI ? BaseCost : InstructionCost::getInvalid();
6997 }
6998 
6999 InstructionCost
7000 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7001                                                      ElementCount VF) {
7002   // Calculate scalar cost only. Vectorization cost should be ready at this
7003   // moment.
7004   if (VF.isScalar()) {
7005     Type *ValTy = getMemInstValueType(I);
7006     const Align Alignment = getLoadStoreAlignment(I);
7007     unsigned AS = getLoadStoreAddressSpace(I);
7008 
7009     return TTI.getAddressComputationCost(ValTy) +
7010            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7011                                TTI::TCK_RecipThroughput, I);
7012   }
7013   return getWideningCost(I, VF);
7014 }
7015 
7016 LoopVectorizationCostModel::VectorizationCostTy
7017 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7018                                                ElementCount VF) {
7019   // If we know that this instruction will remain uniform, check the cost of
7020   // the scalar version.
7021   if (isUniformAfterVectorization(I, VF))
7022     VF = ElementCount::getFixed(1);
7023 
7024   if (VF.isVector() && isProfitableToScalarize(I, VF))
7025     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7026 
7027   // Forced scalars do not have any scalarization overhead.
7028   auto ForcedScalar = ForcedScalars.find(VF);
7029   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7030     auto InstSet = ForcedScalar->second;
7031     if (InstSet.count(I))
7032       return VectorizationCostTy(
7033           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7034            VF.getKnownMinValue()),
7035           false);
7036   }
7037 
7038   Type *VectorTy;
7039   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7040 
7041   bool TypeNotScalarized =
7042       VF.isVector() && VectorTy->isVectorTy() &&
7043       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7044   return VectorizationCostTy(C, TypeNotScalarized);
7045 }
7046 
7047 InstructionCost
7048 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7049                                                      ElementCount VF) {
7050 
7051   if (VF.isScalable())
7052     return InstructionCost::getInvalid();
7053 
7054   if (VF.isScalar())
7055     return 0;
7056 
7057   InstructionCost Cost = 0;
7058   Type *RetTy = ToVectorTy(I->getType(), VF);
7059   if (!RetTy->isVoidTy() &&
7060       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7061     Cost += TTI.getScalarizationOverhead(
7062         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7063         true, false);
7064 
7065   // Some targets keep addresses scalar.
7066   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7067     return Cost;
7068 
7069   // Some targets support efficient element stores.
7070   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7071     return Cost;
7072 
7073   // Collect operands to consider.
7074   CallInst *CI = dyn_cast<CallInst>(I);
7075   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7076 
7077   // Skip operands that do not require extraction/scalarization and do not incur
7078   // any overhead.
7079   SmallVector<Type *> Tys;
7080   for (auto *V : filterExtractingOperands(Ops, VF))
7081     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7082   return Cost + TTI.getOperandsScalarizationOverhead(
7083                     filterExtractingOperands(Ops, VF), Tys);
7084 }
7085 
7086 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7087   if (VF.isScalar())
7088     return;
7089   NumPredStores = 0;
7090   for (BasicBlock *BB : TheLoop->blocks()) {
7091     // For each instruction in the old loop.
7092     for (Instruction &I : *BB) {
7093       Value *Ptr =  getLoadStorePointerOperand(&I);
7094       if (!Ptr)
7095         continue;
7096 
7097       // TODO: We should generate better code and update the cost model for
7098       // predicated uniform stores. Today they are treated as any other
7099       // predicated store (see added test cases in
7100       // invariant-store-vectorization.ll).
7101       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7102         NumPredStores++;
7103 
7104       if (Legal->isUniformMemOp(I)) {
7105         // TODO: Avoid replicating loads and stores instead of
7106         // relying on instcombine to remove them.
7107         // Load: Scalar load + broadcast
7108         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7109         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7110         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7111         continue;
7112       }
7113 
7114       // We assume that widening is the best solution when possible.
7115       if (memoryInstructionCanBeWidened(&I, VF)) {
7116         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7117         int ConsecutiveStride =
7118                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7119         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7120                "Expected consecutive stride.");
7121         InstWidening Decision =
7122             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7123         setWideningDecision(&I, VF, Decision, Cost);
7124         continue;
7125       }
7126 
7127       // Choose between Interleaving, Gather/Scatter or Scalarization.
7128       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7129       unsigned NumAccesses = 1;
7130       if (isAccessInterleaved(&I)) {
7131         auto Group = getInterleavedAccessGroup(&I);
7132         assert(Group && "Fail to get an interleaved access group.");
7133 
7134         // Make one decision for the whole group.
7135         if (getWideningDecision(&I, VF) != CM_Unknown)
7136           continue;
7137 
7138         NumAccesses = Group->getNumMembers();
7139         if (interleavedAccessCanBeWidened(&I, VF))
7140           InterleaveCost = getInterleaveGroupCost(&I, VF);
7141       }
7142 
7143       InstructionCost GatherScatterCost =
7144           isLegalGatherOrScatter(&I)
7145               ? getGatherScatterCost(&I, VF) * NumAccesses
7146               : InstructionCost::getInvalid();
7147 
7148       InstructionCost ScalarizationCost =
7149           !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses
7150                            : InstructionCost::getInvalid();
7151 
7152       // Choose better solution for the current VF,
7153       // write down this decision and use it during vectorization.
7154       InstructionCost Cost;
7155       InstWidening Decision;
7156       if (InterleaveCost <= GatherScatterCost &&
7157           InterleaveCost < ScalarizationCost) {
7158         Decision = CM_Interleave;
7159         Cost = InterleaveCost;
7160       } else if (GatherScatterCost < ScalarizationCost) {
7161         Decision = CM_GatherScatter;
7162         Cost = GatherScatterCost;
7163       } else {
7164         assert(!VF.isScalable() &&
7165                "We cannot yet scalarise for scalable vectors");
7166         Decision = CM_Scalarize;
7167         Cost = ScalarizationCost;
7168       }
7169       // If the instructions belongs to an interleave group, the whole group
7170       // receives the same decision. The whole group receives the cost, but
7171       // the cost will actually be assigned to one instruction.
7172       if (auto Group = getInterleavedAccessGroup(&I))
7173         setWideningDecision(Group, VF, Decision, Cost);
7174       else
7175         setWideningDecision(&I, VF, Decision, Cost);
7176     }
7177   }
7178 
7179   // Make sure that any load of address and any other address computation
7180   // remains scalar unless there is gather/scatter support. This avoids
7181   // inevitable extracts into address registers, and also has the benefit of
7182   // activating LSR more, since that pass can't optimize vectorized
7183   // addresses.
7184   if (TTI.prefersVectorizedAddressing())
7185     return;
7186 
7187   // Start with all scalar pointer uses.
7188   SmallPtrSet<Instruction *, 8> AddrDefs;
7189   for (BasicBlock *BB : TheLoop->blocks())
7190     for (Instruction &I : *BB) {
7191       Instruction *PtrDef =
7192         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7193       if (PtrDef && TheLoop->contains(PtrDef) &&
7194           getWideningDecision(&I, VF) != CM_GatherScatter)
7195         AddrDefs.insert(PtrDef);
7196     }
7197 
7198   // Add all instructions used to generate the addresses.
7199   SmallVector<Instruction *, 4> Worklist;
7200   append_range(Worklist, AddrDefs);
7201   while (!Worklist.empty()) {
7202     Instruction *I = Worklist.pop_back_val();
7203     for (auto &Op : I->operands())
7204       if (auto *InstOp = dyn_cast<Instruction>(Op))
7205         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7206             AddrDefs.insert(InstOp).second)
7207           Worklist.push_back(InstOp);
7208   }
7209 
7210   for (auto *I : AddrDefs) {
7211     if (isa<LoadInst>(I)) {
7212       // Setting the desired widening decision should ideally be handled in
7213       // by cost functions, but since this involves the task of finding out
7214       // if the loaded register is involved in an address computation, it is
7215       // instead changed here when we know this is the case.
7216       InstWidening Decision = getWideningDecision(I, VF);
7217       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7218         // Scalarize a widened load of address.
7219         setWideningDecision(
7220             I, VF, CM_Scalarize,
7221             (VF.getKnownMinValue() *
7222              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7223       else if (auto Group = getInterleavedAccessGroup(I)) {
7224         // Scalarize an interleave group of address loads.
7225         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7226           if (Instruction *Member = Group->getMember(I))
7227             setWideningDecision(
7228                 Member, VF, CM_Scalarize,
7229                 (VF.getKnownMinValue() *
7230                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7231         }
7232       }
7233     } else
7234       // Make sure I gets scalarized and a cost estimate without
7235       // scalarization overhead.
7236       ForcedScalars[VF].insert(I);
7237   }
7238 }
7239 
7240 InstructionCost
7241 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7242                                                Type *&VectorTy) {
7243   Type *RetTy = I->getType();
7244   if (canTruncateToMinimalBitwidth(I, VF))
7245     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7246   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7247   auto SE = PSE.getSE();
7248   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7249 
7250   // TODO: We need to estimate the cost of intrinsic calls.
7251   switch (I->getOpcode()) {
7252   case Instruction::GetElementPtr:
7253     // We mark this instruction as zero-cost because the cost of GEPs in
7254     // vectorized code depends on whether the corresponding memory instruction
7255     // is scalarized or not. Therefore, we handle GEPs with the memory
7256     // instruction cost.
7257     return 0;
7258   case Instruction::Br: {
7259     // In cases of scalarized and predicated instructions, there will be VF
7260     // predicated blocks in the vectorized loop. Each branch around these
7261     // blocks requires also an extract of its vector compare i1 element.
7262     bool ScalarPredicatedBB = false;
7263     BranchInst *BI = cast<BranchInst>(I);
7264     if (VF.isVector() && BI->isConditional() &&
7265         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7266          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7267       ScalarPredicatedBB = true;
7268 
7269     if (ScalarPredicatedBB) {
7270       // Return cost for branches around scalarized and predicated blocks.
7271       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7272       auto *Vec_i1Ty =
7273           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7274       return (TTI.getScalarizationOverhead(
7275                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7276                   false, true) +
7277               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7278                VF.getKnownMinValue()));
7279     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7280       // The back-edge branch will remain, as will all scalar branches.
7281       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7282     else
7283       // This branch will be eliminated by if-conversion.
7284       return 0;
7285     // Note: We currently assume zero cost for an unconditional branch inside
7286     // a predicated block since it will become a fall-through, although we
7287     // may decide in the future to call TTI for all branches.
7288   }
7289   case Instruction::PHI: {
7290     auto *Phi = cast<PHINode>(I);
7291 
7292     // First-order recurrences are replaced by vector shuffles inside the loop.
7293     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7294     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7295       return TTI.getShuffleCost(
7296           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7297           VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7298 
7299     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7300     // converted into select instructions. We require N - 1 selects per phi
7301     // node, where N is the number of incoming values.
7302     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7303       return (Phi->getNumIncomingValues() - 1) *
7304              TTI.getCmpSelInstrCost(
7305                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7306                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7307                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7308 
7309     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7310   }
7311   case Instruction::UDiv:
7312   case Instruction::SDiv:
7313   case Instruction::URem:
7314   case Instruction::SRem:
7315     // If we have a predicated instruction, it may not be executed for each
7316     // vector lane. Get the scalarization cost and scale this amount by the
7317     // probability of executing the predicated block. If the instruction is not
7318     // predicated, we fall through to the next case.
7319     if (VF.isVector() && isScalarWithPredication(I)) {
7320       InstructionCost Cost = 0;
7321 
7322       // These instructions have a non-void type, so account for the phi nodes
7323       // that we will create. This cost is likely to be zero. The phi node
7324       // cost, if any, should be scaled by the block probability because it
7325       // models a copy at the end of each predicated block.
7326       Cost += VF.getKnownMinValue() *
7327               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7328 
7329       // The cost of the non-predicated instruction.
7330       Cost += VF.getKnownMinValue() *
7331               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7332 
7333       // The cost of insertelement and extractelement instructions needed for
7334       // scalarization.
7335       Cost += getScalarizationOverhead(I, VF);
7336 
7337       // Scale the cost by the probability of executing the predicated blocks.
7338       // This assumes the predicated block for each vector lane is equally
7339       // likely.
7340       return Cost / getReciprocalPredBlockProb();
7341     }
7342     LLVM_FALLTHROUGH;
7343   case Instruction::Add:
7344   case Instruction::FAdd:
7345   case Instruction::Sub:
7346   case Instruction::FSub:
7347   case Instruction::Mul:
7348   case Instruction::FMul:
7349   case Instruction::FDiv:
7350   case Instruction::FRem:
7351   case Instruction::Shl:
7352   case Instruction::LShr:
7353   case Instruction::AShr:
7354   case Instruction::And:
7355   case Instruction::Or:
7356   case Instruction::Xor: {
7357     // Since we will replace the stride by 1 the multiplication should go away.
7358     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7359       return 0;
7360 
7361     // Detect reduction patterns
7362     InstructionCost RedCost;
7363     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7364             .isValid())
7365       return RedCost;
7366 
7367     // Certain instructions can be cheaper to vectorize if they have a constant
7368     // second vector operand. One example of this are shifts on x86.
7369     Value *Op2 = I->getOperand(1);
7370     TargetTransformInfo::OperandValueProperties Op2VP;
7371     TargetTransformInfo::OperandValueKind Op2VK =
7372         TTI.getOperandInfo(Op2, Op2VP);
7373     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7374       Op2VK = TargetTransformInfo::OK_UniformValue;
7375 
7376     SmallVector<const Value *, 4> Operands(I->operand_values());
7377     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7378     return N * TTI.getArithmeticInstrCost(
7379                    I->getOpcode(), VectorTy, CostKind,
7380                    TargetTransformInfo::OK_AnyValue,
7381                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7382   }
7383   case Instruction::FNeg: {
7384     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
7385     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7386     return N * TTI.getArithmeticInstrCost(
7387                    I->getOpcode(), VectorTy, CostKind,
7388                    TargetTransformInfo::OK_AnyValue,
7389                    TargetTransformInfo::OK_AnyValue,
7390                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
7391                    I->getOperand(0), I);
7392   }
7393   case Instruction::Select: {
7394     SelectInst *SI = cast<SelectInst>(I);
7395     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7396     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7397     Type *CondTy = SI->getCondition()->getType();
7398     if (!ScalarCond)
7399       CondTy = VectorType::get(CondTy, VF);
7400     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7401                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7402   }
7403   case Instruction::ICmp:
7404   case Instruction::FCmp: {
7405     Type *ValTy = I->getOperand(0)->getType();
7406     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7407     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7408       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7409     VectorTy = ToVectorTy(ValTy, VF);
7410     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7411                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7412   }
7413   case Instruction::Store:
7414   case Instruction::Load: {
7415     ElementCount Width = VF;
7416     if (Width.isVector()) {
7417       InstWidening Decision = getWideningDecision(I, Width);
7418       assert(Decision != CM_Unknown &&
7419              "CM decision should be taken at this point");
7420       if (Decision == CM_Scalarize)
7421         Width = ElementCount::getFixed(1);
7422     }
7423     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7424     return getMemoryInstructionCost(I, VF);
7425   }
7426   case Instruction::ZExt:
7427   case Instruction::SExt:
7428   case Instruction::FPToUI:
7429   case Instruction::FPToSI:
7430   case Instruction::FPExt:
7431   case Instruction::PtrToInt:
7432   case Instruction::IntToPtr:
7433   case Instruction::SIToFP:
7434   case Instruction::UIToFP:
7435   case Instruction::Trunc:
7436   case Instruction::FPTrunc:
7437   case Instruction::BitCast: {
7438     // Computes the CastContextHint from a Load/Store instruction.
7439     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7440       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7441              "Expected a load or a store!");
7442 
7443       if (VF.isScalar() || !TheLoop->contains(I))
7444         return TTI::CastContextHint::Normal;
7445 
7446       switch (getWideningDecision(I, VF)) {
7447       case LoopVectorizationCostModel::CM_GatherScatter:
7448         return TTI::CastContextHint::GatherScatter;
7449       case LoopVectorizationCostModel::CM_Interleave:
7450         return TTI::CastContextHint::Interleave;
7451       case LoopVectorizationCostModel::CM_Scalarize:
7452       case LoopVectorizationCostModel::CM_Widen:
7453         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7454                                         : TTI::CastContextHint::Normal;
7455       case LoopVectorizationCostModel::CM_Widen_Reverse:
7456         return TTI::CastContextHint::Reversed;
7457       case LoopVectorizationCostModel::CM_Unknown:
7458         llvm_unreachable("Instr did not go through cost modelling?");
7459       }
7460 
7461       llvm_unreachable("Unhandled case!");
7462     };
7463 
7464     unsigned Opcode = I->getOpcode();
7465     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7466     // For Trunc, the context is the only user, which must be a StoreInst.
7467     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7468       if (I->hasOneUse())
7469         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7470           CCH = ComputeCCH(Store);
7471     }
7472     // For Z/Sext, the context is the operand, which must be a LoadInst.
7473     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7474              Opcode == Instruction::FPExt) {
7475       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7476         CCH = ComputeCCH(Load);
7477     }
7478 
7479     // We optimize the truncation of induction variables having constant
7480     // integer steps. The cost of these truncations is the same as the scalar
7481     // operation.
7482     if (isOptimizableIVTruncate(I, VF)) {
7483       auto *Trunc = cast<TruncInst>(I);
7484       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7485                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7486     }
7487 
7488     // Detect reduction patterns
7489     InstructionCost RedCost;
7490     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7491             .isValid())
7492       return RedCost;
7493 
7494     Type *SrcScalarTy = I->getOperand(0)->getType();
7495     Type *SrcVecTy =
7496         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7497     if (canTruncateToMinimalBitwidth(I, VF)) {
7498       // This cast is going to be shrunk. This may remove the cast or it might
7499       // turn it into slightly different cast. For example, if MinBW == 16,
7500       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7501       //
7502       // Calculate the modified src and dest types.
7503       Type *MinVecTy = VectorTy;
7504       if (Opcode == Instruction::Trunc) {
7505         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7506         VectorTy =
7507             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7508       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7509         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7510         VectorTy =
7511             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7512       }
7513     }
7514 
7515     unsigned N;
7516     if (isScalarAfterVectorization(I, VF)) {
7517       assert(!VF.isScalable() && "VF is assumed to be non scalable");
7518       N = VF.getKnownMinValue();
7519     } else
7520       N = 1;
7521     return N *
7522            TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7523   }
7524   case Instruction::Call: {
7525     bool NeedToScalarize;
7526     CallInst *CI = cast<CallInst>(I);
7527     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7528     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7529       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7530       return std::min(CallCost, IntrinsicCost);
7531     }
7532     return CallCost;
7533   }
7534   case Instruction::ExtractValue:
7535     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7536   default:
7537     // The cost of executing VF copies of the scalar instruction. This opcode
7538     // is unknown. Assume that it is the same as 'mul'.
7539     return VF.getKnownMinValue() * TTI.getArithmeticInstrCost(
7540                                        Instruction::Mul, VectorTy, CostKind) +
7541            getScalarizationOverhead(I, VF);
7542   } // end of switch.
7543 }
7544 
7545 char LoopVectorize::ID = 0;
7546 
7547 static const char lv_name[] = "Loop Vectorization";
7548 
7549 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7550 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7551 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7552 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7553 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7554 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7555 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7556 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7557 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7558 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7559 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7560 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7561 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7562 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7563 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7564 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7565 
7566 namespace llvm {
7567 
7568 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7569 
7570 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7571                               bool VectorizeOnlyWhenForced) {
7572   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7573 }
7574 
7575 } // end namespace llvm
7576 
7577 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7578   // Check if the pointer operand of a load or store instruction is
7579   // consecutive.
7580   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7581     return Legal->isConsecutivePtr(Ptr);
7582   return false;
7583 }
7584 
7585 void LoopVectorizationCostModel::collectValuesToIgnore() {
7586   // Ignore ephemeral values.
7587   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7588 
7589   // Ignore type-promoting instructions we identified during reduction
7590   // detection.
7591   for (auto &Reduction : Legal->getReductionVars()) {
7592     RecurrenceDescriptor &RedDes = Reduction.second;
7593     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7594     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7595   }
7596   // Ignore type-casting instructions we identified during induction
7597   // detection.
7598   for (auto &Induction : Legal->getInductionVars()) {
7599     InductionDescriptor &IndDes = Induction.second;
7600     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7601     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7602   }
7603 }
7604 
7605 void LoopVectorizationCostModel::collectInLoopReductions() {
7606   for (auto &Reduction : Legal->getReductionVars()) {
7607     PHINode *Phi = Reduction.first;
7608     RecurrenceDescriptor &RdxDesc = Reduction.second;
7609 
7610     // We don't collect reductions that are type promoted (yet).
7611     if (RdxDesc.getRecurrenceType() != Phi->getType())
7612       continue;
7613 
7614     // If the target would prefer this reduction to happen "in-loop", then we
7615     // want to record it as such.
7616     unsigned Opcode = RdxDesc.getOpcode();
7617     if (!PreferInLoopReductions &&
7618         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7619                                    TargetTransformInfo::ReductionFlags()))
7620       continue;
7621 
7622     // Check that we can correctly put the reductions into the loop, by
7623     // finding the chain of operations that leads from the phi to the loop
7624     // exit value.
7625     SmallVector<Instruction *, 4> ReductionOperations =
7626         RdxDesc.getReductionOpChain(Phi, TheLoop);
7627     bool InLoop = !ReductionOperations.empty();
7628     if (InLoop) {
7629       InLoopReductionChains[Phi] = ReductionOperations;
7630       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7631       Instruction *LastChain = Phi;
7632       for (auto *I : ReductionOperations) {
7633         InLoopReductionImmediateChains[I] = LastChain;
7634         LastChain = I;
7635       }
7636     }
7637     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7638                       << " reduction for phi: " << *Phi << "\n");
7639   }
7640 }
7641 
7642 // TODO: we could return a pair of values that specify the max VF and
7643 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7644 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7645 // doesn't have a cost model that can choose which plan to execute if
7646 // more than one is generated.
7647 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7648                                  LoopVectorizationCostModel &CM) {
7649   unsigned WidestType;
7650   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7651   return WidestVectorRegBits / WidestType;
7652 }
7653 
7654 VectorizationFactor
7655 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7656   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7657   ElementCount VF = UserVF;
7658   // Outer loop handling: They may require CFG and instruction level
7659   // transformations before even evaluating whether vectorization is profitable.
7660   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7661   // the vectorization pipeline.
7662   if (!OrigLoop->isInnermost()) {
7663     // If the user doesn't provide a vectorization factor, determine a
7664     // reasonable one.
7665     if (UserVF.isZero()) {
7666       VF = ElementCount::getFixed(
7667           determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM));
7668       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7669 
7670       // Make sure we have a VF > 1 for stress testing.
7671       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7672         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7673                           << "overriding computed VF.\n");
7674         VF = ElementCount::getFixed(4);
7675       }
7676     }
7677     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7678     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7679            "VF needs to be a power of two");
7680     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7681                       << "VF " << VF << " to build VPlans.\n");
7682     buildVPlans(VF, VF);
7683 
7684     // For VPlan build stress testing, we bail out after VPlan construction.
7685     if (VPlanBuildStressTest)
7686       return VectorizationFactor::Disabled();
7687 
7688     return {VF, 0 /*Cost*/};
7689   }
7690 
7691   LLVM_DEBUG(
7692       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7693                 "VPlan-native path.\n");
7694   return VectorizationFactor::Disabled();
7695 }
7696 
7697 Optional<VectorizationFactor>
7698 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7699   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7700   Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
7701   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
7702     return None;
7703 
7704   // Invalidate interleave groups if all blocks of loop will be predicated.
7705   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7706       !useMaskedInterleavedAccesses(*TTI)) {
7707     LLVM_DEBUG(
7708         dbgs()
7709         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7710            "which requires masked-interleaved support.\n");
7711     if (CM.InterleaveInfo.invalidateGroups())
7712       // Invalidating interleave groups also requires invalidating all decisions
7713       // based on them, which includes widening decisions and uniform and scalar
7714       // values.
7715       CM.invalidateCostModelingDecisions();
7716   }
7717 
7718   ElementCount MaxVF = MaybeMaxVF.getValue();
7719   assert(MaxVF.isNonZero() && "MaxVF is zero.");
7720 
7721   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF);
7722   if (!UserVF.isZero() &&
7723       (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) {
7724     // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable
7725     // VFs here, this should be reverted to only use legal UserVFs once the
7726     // loop below supports scalable VFs.
7727     ElementCount VF = UserVFIsLegal ? UserVF : MaxVF;
7728     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7729                       << " VF " << VF << ".\n");
7730     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7731            "VF needs to be a power of two");
7732     // Collect the instructions (and their associated costs) that will be more
7733     // profitable to scalarize.
7734     CM.selectUserVectorizationFactor(VF);
7735     CM.collectInLoopReductions();
7736     buildVPlansWithVPRecipes(VF, VF);
7737     LLVM_DEBUG(printPlans(dbgs()));
7738     return {{VF, 0}};
7739   }
7740 
7741   assert(!MaxVF.isScalable() &&
7742          "Scalable vectors not yet supported beyond this point");
7743 
7744   for (ElementCount VF = ElementCount::getFixed(1);
7745        ElementCount::isKnownLE(VF, MaxVF); VF *= 2) {
7746     // Collect Uniform and Scalar instructions after vectorization with VF.
7747     CM.collectUniformsAndScalars(VF);
7748 
7749     // Collect the instructions (and their associated costs) that will be more
7750     // profitable to scalarize.
7751     if (VF.isVector())
7752       CM.collectInstsToScalarize(VF);
7753   }
7754 
7755   CM.collectInLoopReductions();
7756 
7757   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF);
7758   LLVM_DEBUG(printPlans(dbgs()));
7759   if (MaxVF.isScalar())
7760     return VectorizationFactor::Disabled();
7761 
7762   // Select the optimal vectorization factor.
7763   return CM.selectVectorizationFactor(MaxVF);
7764 }
7765 
7766 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
7767   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
7768                     << '\n');
7769   BestVF = VF;
7770   BestUF = UF;
7771 
7772   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7773     return !Plan->hasVF(VF);
7774   });
7775   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7776 }
7777 
7778 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7779                                            DominatorTree *DT) {
7780   // Perform the actual loop transformation.
7781 
7782   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7783   assert(BestVF.hasValue() && "Vectorization Factor is missing");
7784   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7785 
7786   VPTransformState State{
7787       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
7788   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7789   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7790   State.CanonicalIV = ILV.Induction;
7791 
7792   ILV.printDebugTracesAtStart();
7793 
7794   //===------------------------------------------------===//
7795   //
7796   // Notice: any optimization or new instruction that go
7797   // into the code below should also be implemented in
7798   // the cost-model.
7799   //
7800   //===------------------------------------------------===//
7801 
7802   // 2. Copy and widen instructions from the old loop into the new loop.
7803   VPlans.front()->execute(&State);
7804 
7805   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7806   //    predication, updating analyses.
7807   ILV.fixVectorizedLoop(State);
7808 
7809   ILV.printDebugTracesAtEnd();
7810 }
7811 
7812 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7813     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7814 
7815   // We create new control-flow for the vectorized loop, so the original exit
7816   // conditions will be dead after vectorization if it's only used by the
7817   // terminator
7818   SmallVector<BasicBlock*> ExitingBlocks;
7819   OrigLoop->getExitingBlocks(ExitingBlocks);
7820   for (auto *BB : ExitingBlocks) {
7821     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7822     if (!Cmp || !Cmp->hasOneUse())
7823       continue;
7824 
7825     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7826     if (!DeadInstructions.insert(Cmp).second)
7827       continue;
7828 
7829     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7830     // TODO: can recurse through operands in general
7831     for (Value *Op : Cmp->operands()) {
7832       if (isa<TruncInst>(Op) && Op->hasOneUse())
7833           DeadInstructions.insert(cast<Instruction>(Op));
7834     }
7835   }
7836 
7837   // We create new "steps" for induction variable updates to which the original
7838   // induction variables map. An original update instruction will be dead if
7839   // all its users except the induction variable are dead.
7840   auto *Latch = OrigLoop->getLoopLatch();
7841   for (auto &Induction : Legal->getInductionVars()) {
7842     PHINode *Ind = Induction.first;
7843     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7844 
7845     // If the tail is to be folded by masking, the primary induction variable,
7846     // if exists, isn't dead: it will be used for masking. Don't kill it.
7847     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7848       continue;
7849 
7850     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7851           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7852         }))
7853       DeadInstructions.insert(IndUpdate);
7854 
7855     // We record as "Dead" also the type-casting instructions we had identified
7856     // during induction analysis. We don't need any handling for them in the
7857     // vectorized loop because we have proven that, under a proper runtime
7858     // test guarding the vectorized loop, the value of the phi, and the casted
7859     // value of the phi, are the same. The last instruction in this casting chain
7860     // will get its scalar/vector/widened def from the scalar/vector/widened def
7861     // of the respective phi node. Any other casts in the induction def-use chain
7862     // have no other uses outside the phi update chain, and will be ignored.
7863     InductionDescriptor &IndDes = Induction.second;
7864     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7865     DeadInstructions.insert(Casts.begin(), Casts.end());
7866   }
7867 }
7868 
7869 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7870 
7871 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7872 
7873 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7874                                         Instruction::BinaryOps BinOp) {
7875   // When unrolling and the VF is 1, we only need to add a simple scalar.
7876   Type *Ty = Val->getType();
7877   assert(!Ty->isVectorTy() && "Val must be a scalar");
7878 
7879   if (Ty->isFloatingPointTy()) {
7880     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7881 
7882     // Floating-point operations inherit FMF via the builder's flags.
7883     Value *MulOp = Builder.CreateFMul(C, Step);
7884     return Builder.CreateBinOp(BinOp, Val, MulOp);
7885   }
7886   Constant *C = ConstantInt::get(Ty, StartIdx);
7887   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7888 }
7889 
7890 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7891   SmallVector<Metadata *, 4> MDs;
7892   // Reserve first location for self reference to the LoopID metadata node.
7893   MDs.push_back(nullptr);
7894   bool IsUnrollMetadata = false;
7895   MDNode *LoopID = L->getLoopID();
7896   if (LoopID) {
7897     // First find existing loop unrolling disable metadata.
7898     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7899       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7900       if (MD) {
7901         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7902         IsUnrollMetadata =
7903             S && S->getString().startswith("llvm.loop.unroll.disable");
7904       }
7905       MDs.push_back(LoopID->getOperand(i));
7906     }
7907   }
7908 
7909   if (!IsUnrollMetadata) {
7910     // Add runtime unroll disable metadata.
7911     LLVMContext &Context = L->getHeader()->getContext();
7912     SmallVector<Metadata *, 1> DisableOperands;
7913     DisableOperands.push_back(
7914         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7915     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7916     MDs.push_back(DisableNode);
7917     MDNode *NewLoopID = MDNode::get(Context, MDs);
7918     // Set operand 0 to refer to the loop id itself.
7919     NewLoopID->replaceOperandWith(0, NewLoopID);
7920     L->setLoopID(NewLoopID);
7921   }
7922 }
7923 
7924 //===--------------------------------------------------------------------===//
7925 // EpilogueVectorizerMainLoop
7926 //===--------------------------------------------------------------------===//
7927 
7928 /// This function is partially responsible for generating the control flow
7929 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7930 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7931   MDNode *OrigLoopID = OrigLoop->getLoopID();
7932   Loop *Lp = createVectorLoopSkeleton("");
7933 
7934   // Generate the code to check the minimum iteration count of the vector
7935   // epilogue (see below).
7936   EPI.EpilogueIterationCountCheck =
7937       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
7938   EPI.EpilogueIterationCountCheck->setName("iter.check");
7939 
7940   // Generate the code to check any assumptions that we've made for SCEV
7941   // expressions.
7942   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
7943 
7944   // Generate the code that checks at runtime if arrays overlap. We put the
7945   // checks into a separate block to make the more common case of few elements
7946   // faster.
7947   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
7948 
7949   // Generate the iteration count check for the main loop, *after* the check
7950   // for the epilogue loop, so that the path-length is shorter for the case
7951   // that goes directly through the vector epilogue. The longer-path length for
7952   // the main loop is compensated for, by the gain from vectorizing the larger
7953   // trip count. Note: the branch will get updated later on when we vectorize
7954   // the epilogue.
7955   EPI.MainLoopIterationCountCheck =
7956       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
7957 
7958   // Generate the induction variable.
7959   OldInduction = Legal->getPrimaryInduction();
7960   Type *IdxTy = Legal->getWidestInductionType();
7961   Value *StartIdx = ConstantInt::get(IdxTy, 0);
7962   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
7963   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7964   EPI.VectorTripCount = CountRoundDown;
7965   Induction =
7966       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
7967                               getDebugLocFromInstOrOperands(OldInduction));
7968 
7969   // Skip induction resume value creation here because they will be created in
7970   // the second pass. If we created them here, they wouldn't be used anyway,
7971   // because the vplan in the second pass still contains the inductions from the
7972   // original loop.
7973 
7974   return completeLoopSkeleton(Lp, OrigLoopID);
7975 }
7976 
7977 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7978   LLVM_DEBUG({
7979     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7980            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
7981            << ", Main Loop UF:" << EPI.MainLoopUF
7982            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
7983            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7984   });
7985 }
7986 
7987 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7988   DEBUG_WITH_TYPE(VerboseDebug, {
7989     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
7990   });
7991 }
7992 
7993 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
7994     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
7995   assert(L && "Expected valid Loop.");
7996   assert(Bypass && "Expected valid bypass basic block.");
7997   unsigned VFactor =
7998       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
7999   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8000   Value *Count = getOrCreateTripCount(L);
8001   // Reuse existing vector loop preheader for TC checks.
8002   // Note that new preheader block is generated for vector loop.
8003   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8004   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8005 
8006   // Generate code to check if the loop's trip count is less than VF * UF of the
8007   // main vector loop.
8008   auto P =
8009       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8010 
8011   Value *CheckMinIters = Builder.CreateICmp(
8012       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8013       "min.iters.check");
8014 
8015   if (!ForEpilogue)
8016     TCCheckBlock->setName("vector.main.loop.iter.check");
8017 
8018   // Create new preheader for vector loop.
8019   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8020                                    DT, LI, nullptr, "vector.ph");
8021 
8022   if (ForEpilogue) {
8023     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8024                                  DT->getNode(Bypass)->getIDom()) &&
8025            "TC check is expected to dominate Bypass");
8026 
8027     // Update dominator for Bypass & LoopExit.
8028     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8029     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8030 
8031     LoopBypassBlocks.push_back(TCCheckBlock);
8032 
8033     // Save the trip count so we don't have to regenerate it in the
8034     // vec.epilog.iter.check. This is safe to do because the trip count
8035     // generated here dominates the vector epilog iter check.
8036     EPI.TripCount = Count;
8037   }
8038 
8039   ReplaceInstWithInst(
8040       TCCheckBlock->getTerminator(),
8041       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8042 
8043   return TCCheckBlock;
8044 }
8045 
8046 //===--------------------------------------------------------------------===//
8047 // EpilogueVectorizerEpilogueLoop
8048 //===--------------------------------------------------------------------===//
8049 
8050 /// This function is partially responsible for generating the control flow
8051 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8052 BasicBlock *
8053 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8054   MDNode *OrigLoopID = OrigLoop->getLoopID();
8055   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8056 
8057   // Now, compare the remaining count and if there aren't enough iterations to
8058   // execute the vectorized epilogue skip to the scalar part.
8059   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8060   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8061   LoopVectorPreHeader =
8062       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8063                  LI, nullptr, "vec.epilog.ph");
8064   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8065                                           VecEpilogueIterationCountCheck);
8066 
8067   // Adjust the control flow taking the state info from the main loop
8068   // vectorization into account.
8069   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8070          "expected this to be saved from the previous pass.");
8071   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8072       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8073 
8074   DT->changeImmediateDominator(LoopVectorPreHeader,
8075                                EPI.MainLoopIterationCountCheck);
8076 
8077   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8078       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8079 
8080   if (EPI.SCEVSafetyCheck)
8081     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8082         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8083   if (EPI.MemSafetyCheck)
8084     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8085         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8086 
8087   DT->changeImmediateDominator(
8088       VecEpilogueIterationCountCheck,
8089       VecEpilogueIterationCountCheck->getSinglePredecessor());
8090 
8091   DT->changeImmediateDominator(LoopScalarPreHeader,
8092                                EPI.EpilogueIterationCountCheck);
8093   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8094 
8095   // Keep track of bypass blocks, as they feed start values to the induction
8096   // phis in the scalar loop preheader.
8097   if (EPI.SCEVSafetyCheck)
8098     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8099   if (EPI.MemSafetyCheck)
8100     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8101   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8102 
8103   // Generate a resume induction for the vector epilogue and put it in the
8104   // vector epilogue preheader
8105   Type *IdxTy = Legal->getWidestInductionType();
8106   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8107                                          LoopVectorPreHeader->getFirstNonPHI());
8108   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8109   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8110                            EPI.MainLoopIterationCountCheck);
8111 
8112   // Generate the induction variable.
8113   OldInduction = Legal->getPrimaryInduction();
8114   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8115   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8116   Value *StartIdx = EPResumeVal;
8117   Induction =
8118       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8119                               getDebugLocFromInstOrOperands(OldInduction));
8120 
8121   // Generate induction resume values. These variables save the new starting
8122   // indexes for the scalar loop. They are used to test if there are any tail
8123   // iterations left once the vector loop has completed.
8124   // Note that when the vectorized epilogue is skipped due to iteration count
8125   // check, then the resume value for the induction variable comes from
8126   // the trip count of the main vector loop, hence passing the AdditionalBypass
8127   // argument.
8128   createInductionResumeValues(Lp, CountRoundDown,
8129                               {VecEpilogueIterationCountCheck,
8130                                EPI.VectorTripCount} /* AdditionalBypass */);
8131 
8132   AddRuntimeUnrollDisableMetaData(Lp);
8133   return completeLoopSkeleton(Lp, OrigLoopID);
8134 }
8135 
8136 BasicBlock *
8137 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8138     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8139 
8140   assert(EPI.TripCount &&
8141          "Expected trip count to have been safed in the first pass.");
8142   assert(
8143       (!isa<Instruction>(EPI.TripCount) ||
8144        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8145       "saved trip count does not dominate insertion point.");
8146   Value *TC = EPI.TripCount;
8147   IRBuilder<> Builder(Insert->getTerminator());
8148   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8149 
8150   // Generate code to check if the loop's trip count is less than VF * UF of the
8151   // vector epilogue loop.
8152   auto P =
8153       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8154 
8155   Value *CheckMinIters = Builder.CreateICmp(
8156       P, Count,
8157       ConstantInt::get(Count->getType(),
8158                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8159       "min.epilog.iters.check");
8160 
8161   ReplaceInstWithInst(
8162       Insert->getTerminator(),
8163       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8164 
8165   LoopBypassBlocks.push_back(Insert);
8166   return Insert;
8167 }
8168 
8169 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8170   LLVM_DEBUG({
8171     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8172            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8173            << ", Main Loop UF:" << EPI.MainLoopUF
8174            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8175            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8176   });
8177 }
8178 
8179 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8180   DEBUG_WITH_TYPE(VerboseDebug, {
8181     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8182   });
8183 }
8184 
8185 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8186     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8187   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8188   bool PredicateAtRangeStart = Predicate(Range.Start);
8189 
8190   for (ElementCount TmpVF = Range.Start * 2;
8191        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8192     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8193       Range.End = TmpVF;
8194       break;
8195     }
8196 
8197   return PredicateAtRangeStart;
8198 }
8199 
8200 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8201 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8202 /// of VF's starting at a given VF and extending it as much as possible. Each
8203 /// vectorization decision can potentially shorten this sub-range during
8204 /// buildVPlan().
8205 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8206                                            ElementCount MaxVF) {
8207   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8208   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8209     VFRange SubRange = {VF, MaxVFPlusOne};
8210     VPlans.push_back(buildVPlan(SubRange));
8211     VF = SubRange.End;
8212   }
8213 }
8214 
8215 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8216                                          VPlanPtr &Plan) {
8217   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8218 
8219   // Look for cached value.
8220   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8221   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8222   if (ECEntryIt != EdgeMaskCache.end())
8223     return ECEntryIt->second;
8224 
8225   VPValue *SrcMask = createBlockInMask(Src, Plan);
8226 
8227   // The terminator has to be a branch inst!
8228   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8229   assert(BI && "Unexpected terminator found");
8230 
8231   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8232     return EdgeMaskCache[Edge] = SrcMask;
8233 
8234   // If source is an exiting block, we know the exit edge is dynamically dead
8235   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8236   // adding uses of an otherwise potentially dead instruction.
8237   if (OrigLoop->isLoopExiting(Src))
8238     return EdgeMaskCache[Edge] = SrcMask;
8239 
8240   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8241   assert(EdgeMask && "No Edge Mask found for condition");
8242 
8243   if (BI->getSuccessor(0) != Dst)
8244     EdgeMask = Builder.createNot(EdgeMask);
8245 
8246   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8247     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8248     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8249     // The select version does not introduce new UB if SrcMask is false and
8250     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8251     VPValue *False = Plan->getOrAddVPValue(
8252         ConstantInt::getFalse(BI->getCondition()->getType()));
8253     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8254   }
8255 
8256   return EdgeMaskCache[Edge] = EdgeMask;
8257 }
8258 
8259 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8260   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8261 
8262   // Look for cached value.
8263   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8264   if (BCEntryIt != BlockMaskCache.end())
8265     return BCEntryIt->second;
8266 
8267   // All-one mask is modelled as no-mask following the convention for masked
8268   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8269   VPValue *BlockMask = nullptr;
8270 
8271   if (OrigLoop->getHeader() == BB) {
8272     if (!CM.blockNeedsPredication(BB))
8273       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8274 
8275     // Create the block in mask as the first non-phi instruction in the block.
8276     VPBuilder::InsertPointGuard Guard(Builder);
8277     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8278     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8279 
8280     // Introduce the early-exit compare IV <= BTC to form header block mask.
8281     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8282     // Start by constructing the desired canonical IV.
8283     VPValue *IV = nullptr;
8284     if (Legal->getPrimaryInduction())
8285       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8286     else {
8287       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8288       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8289       IV = IVRecipe->getVPValue();
8290     }
8291     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8292     bool TailFolded = !CM.isScalarEpilogueAllowed();
8293 
8294     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8295       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8296       // as a second argument, we only pass the IV here and extract the
8297       // tripcount from the transform state where codegen of the VP instructions
8298       // happen.
8299       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8300     } else {
8301       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8302     }
8303     return BlockMaskCache[BB] = BlockMask;
8304   }
8305 
8306   // This is the block mask. We OR all incoming edges.
8307   for (auto *Predecessor : predecessors(BB)) {
8308     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8309     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8310       return BlockMaskCache[BB] = EdgeMask;
8311 
8312     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8313       BlockMask = EdgeMask;
8314       continue;
8315     }
8316 
8317     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8318   }
8319 
8320   return BlockMaskCache[BB] = BlockMask;
8321 }
8322 
8323 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
8324                                                 VPlanPtr &Plan) {
8325   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8326          "Must be called with either a load or store");
8327 
8328   auto willWiden = [&](ElementCount VF) -> bool {
8329     if (VF.isScalar())
8330       return false;
8331     LoopVectorizationCostModel::InstWidening Decision =
8332         CM.getWideningDecision(I, VF);
8333     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8334            "CM decision should be taken at this point.");
8335     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8336       return true;
8337     if (CM.isScalarAfterVectorization(I, VF) ||
8338         CM.isProfitableToScalarize(I, VF))
8339       return false;
8340     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8341   };
8342 
8343   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8344     return nullptr;
8345 
8346   VPValue *Mask = nullptr;
8347   if (Legal->isMaskRequired(I))
8348     Mask = createBlockInMask(I->getParent(), Plan);
8349 
8350   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
8351   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8352     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
8353 
8354   StoreInst *Store = cast<StoreInst>(I);
8355   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
8356   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
8357 }
8358 
8359 VPWidenIntOrFpInductionRecipe *
8360 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const {
8361   // Check if this is an integer or fp induction. If so, build the recipe that
8362   // produces its scalar and vector values.
8363   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8364   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8365       II.getKind() == InductionDescriptor::IK_FpInduction) {
8366     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8367     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8368     return new VPWidenIntOrFpInductionRecipe(
8369         Phi, Start, Casts.empty() ? nullptr : Casts.front());
8370   }
8371 
8372   return nullptr;
8373 }
8374 
8375 VPWidenIntOrFpInductionRecipe *
8376 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range,
8377                                                 VPlan &Plan) const {
8378   // Optimize the special case where the source is a constant integer
8379   // induction variable. Notice that we can only optimize the 'trunc' case
8380   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8381   // (c) other casts depend on pointer size.
8382 
8383   // Determine whether \p K is a truncation based on an induction variable that
8384   // can be optimized.
8385   auto isOptimizableIVTruncate =
8386       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8387     return [=](ElementCount VF) -> bool {
8388       return CM.isOptimizableIVTruncate(K, VF);
8389     };
8390   };
8391 
8392   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8393           isOptimizableIVTruncate(I), Range)) {
8394 
8395     InductionDescriptor II =
8396         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8397     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8398     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8399                                              Start, nullptr, I);
8400   }
8401   return nullptr;
8402 }
8403 
8404 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
8405   // If all incoming values are equal, the incoming VPValue can be used directly
8406   // instead of creating a new VPBlendRecipe.
8407   Value *FirstIncoming = Phi->getIncomingValue(0);
8408   if (all_of(Phi->incoming_values(), [FirstIncoming](const Value *Inc) {
8409         return FirstIncoming == Inc;
8410       })) {
8411     return Plan->getOrAddVPValue(Phi->getIncomingValue(0));
8412   }
8413 
8414   // We know that all PHIs in non-header blocks are converted into selects, so
8415   // we don't have to worry about the insertion order and we can just use the
8416   // builder. At this point we generate the predication tree. There may be
8417   // duplications since this is a simple recursive scan, but future
8418   // optimizations will clean it up.
8419   SmallVector<VPValue *, 2> Operands;
8420   unsigned NumIncoming = Phi->getNumIncomingValues();
8421 
8422   for (unsigned In = 0; In < NumIncoming; In++) {
8423     VPValue *EdgeMask =
8424       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8425     assert((EdgeMask || NumIncoming == 1) &&
8426            "Multiple predecessors with one having a full mask");
8427     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
8428     if (EdgeMask)
8429       Operands.push_back(EdgeMask);
8430   }
8431   return toVPRecipeResult(new VPBlendRecipe(Phi, Operands));
8432 }
8433 
8434 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
8435                                                    VPlan &Plan) const {
8436 
8437   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8438       [this, CI](ElementCount VF) {
8439         return CM.isScalarWithPredication(CI, VF);
8440       },
8441       Range);
8442 
8443   if (IsPredicated)
8444     return nullptr;
8445 
8446   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8447   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8448              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8449              ID == Intrinsic::pseudoprobe ||
8450              ID == Intrinsic::experimental_noalias_scope_decl))
8451     return nullptr;
8452 
8453   auto willWiden = [&](ElementCount VF) -> bool {
8454     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8455     // The following case may be scalarized depending on the VF.
8456     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8457     // version of the instruction.
8458     // Is it beneficial to perform intrinsic call compared to lib call?
8459     bool NeedToScalarize = false;
8460     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8461     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8462     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8463     assert(IntrinsicCost.isValid() && CallCost.isValid() &&
8464            "Cannot have invalid costs while widening");
8465     return UseVectorIntrinsic || !NeedToScalarize;
8466   };
8467 
8468   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8469     return nullptr;
8470 
8471   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
8472 }
8473 
8474 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8475   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8476          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8477   // Instruction should be widened, unless it is scalar after vectorization,
8478   // scalarization is profitable or it is predicated.
8479   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8480     return CM.isScalarAfterVectorization(I, VF) ||
8481            CM.isProfitableToScalarize(I, VF) ||
8482            CM.isScalarWithPredication(I, VF);
8483   };
8484   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8485                                                              Range);
8486 }
8487 
8488 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
8489   auto IsVectorizableOpcode = [](unsigned Opcode) {
8490     switch (Opcode) {
8491     case Instruction::Add:
8492     case Instruction::And:
8493     case Instruction::AShr:
8494     case Instruction::BitCast:
8495     case Instruction::FAdd:
8496     case Instruction::FCmp:
8497     case Instruction::FDiv:
8498     case Instruction::FMul:
8499     case Instruction::FNeg:
8500     case Instruction::FPExt:
8501     case Instruction::FPToSI:
8502     case Instruction::FPToUI:
8503     case Instruction::FPTrunc:
8504     case Instruction::FRem:
8505     case Instruction::FSub:
8506     case Instruction::ICmp:
8507     case Instruction::IntToPtr:
8508     case Instruction::LShr:
8509     case Instruction::Mul:
8510     case Instruction::Or:
8511     case Instruction::PtrToInt:
8512     case Instruction::SDiv:
8513     case Instruction::Select:
8514     case Instruction::SExt:
8515     case Instruction::Shl:
8516     case Instruction::SIToFP:
8517     case Instruction::SRem:
8518     case Instruction::Sub:
8519     case Instruction::Trunc:
8520     case Instruction::UDiv:
8521     case Instruction::UIToFP:
8522     case Instruction::URem:
8523     case Instruction::Xor:
8524     case Instruction::ZExt:
8525       return true;
8526     }
8527     return false;
8528   };
8529 
8530   if (!IsVectorizableOpcode(I->getOpcode()))
8531     return nullptr;
8532 
8533   // Success: widen this instruction.
8534   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
8535 }
8536 
8537 VPBasicBlock *VPRecipeBuilder::handleReplication(
8538     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8539     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
8540     VPlanPtr &Plan) {
8541   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8542       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8543       Range);
8544 
8545   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8546       [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); },
8547       Range);
8548 
8549   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8550                                        IsUniform, IsPredicated);
8551   setRecipe(I, Recipe);
8552   Plan->addVPValue(I, Recipe);
8553 
8554   // Find if I uses a predicated instruction. If so, it will use its scalar
8555   // value. Avoid hoisting the insert-element which packs the scalar value into
8556   // a vector value, as that happens iff all users use the vector value.
8557   for (auto &Op : I->operands())
8558     if (auto *PredInst = dyn_cast<Instruction>(Op))
8559       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
8560         PredInst2Recipe[PredInst]->setAlsoPack(false);
8561 
8562   // Finalize the recipe for Instr, first if it is not predicated.
8563   if (!IsPredicated) {
8564     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8565     VPBB->appendRecipe(Recipe);
8566     return VPBB;
8567   }
8568   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8569   assert(VPBB->getSuccessors().empty() &&
8570          "VPBB has successors when handling predicated replication.");
8571   // Record predicated instructions for above packing optimizations.
8572   PredInst2Recipe[I] = Recipe;
8573   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8574   VPBlockUtils::insertBlockAfter(Region, VPBB);
8575   auto *RegSucc = new VPBasicBlock();
8576   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8577   return RegSucc;
8578 }
8579 
8580 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8581                                                       VPRecipeBase *PredRecipe,
8582                                                       VPlanPtr &Plan) {
8583   // Instructions marked for predication are replicated and placed under an
8584   // if-then construct to prevent side-effects.
8585 
8586   // Generate recipes to compute the block mask for this region.
8587   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8588 
8589   // Build the triangular if-then region.
8590   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8591   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8592   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8593   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8594   auto *PHIRecipe = Instr->getType()->isVoidTy()
8595                         ? nullptr
8596                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8597   if (PHIRecipe) {
8598     Plan->removeVPValueFor(Instr);
8599     Plan->addVPValue(Instr, PHIRecipe);
8600   }
8601   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8602   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8603   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8604 
8605   // Note: first set Entry as region entry and then connect successors starting
8606   // from it in order, to propagate the "parent" of each VPBasicBlock.
8607   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8608   VPBlockUtils::connectBlocks(Pred, Exit);
8609 
8610   return Region;
8611 }
8612 
8613 VPRecipeOrVPValueTy VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8614                                                             VFRange &Range,
8615                                                             VPlanPtr &Plan) {
8616   // First, check for specific widening recipes that deal with calls, memory
8617   // operations, inductions and Phi nodes.
8618   if (auto *CI = dyn_cast<CallInst>(Instr))
8619     return toVPRecipeResult(tryToWidenCall(CI, Range, *Plan));
8620 
8621   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8622     return toVPRecipeResult(tryToWidenMemory(Instr, Range, Plan));
8623 
8624   VPRecipeBase *Recipe;
8625   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8626     if (Phi->getParent() != OrigLoop->getHeader())
8627       return tryToBlend(Phi, Plan);
8628     if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan)))
8629       return toVPRecipeResult(Recipe);
8630 
8631     if (Legal->isReductionVariable(Phi)) {
8632       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8633       VPValue *StartV =
8634           Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue());
8635       return toVPRecipeResult(new VPWidenPHIRecipe(Phi, RdxDesc, *StartV));
8636     }
8637 
8638     return toVPRecipeResult(new VPWidenPHIRecipe(Phi));
8639   }
8640 
8641   if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8642                                     cast<TruncInst>(Instr), Range, *Plan)))
8643     return toVPRecipeResult(Recipe);
8644 
8645   if (!shouldWiden(Instr, Range))
8646     return nullptr;
8647 
8648   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8649     return toVPRecipeResult(new VPWidenGEPRecipe(
8650         GEP, Plan->mapToVPValues(GEP->operands()), OrigLoop));
8651 
8652   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8653     bool InvariantCond =
8654         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8655     return toVPRecipeResult(new VPWidenSelectRecipe(
8656         *SI, Plan->mapToVPValues(SI->operands()), InvariantCond));
8657   }
8658 
8659   return toVPRecipeResult(tryToWiden(Instr, *Plan));
8660 }
8661 
8662 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8663                                                         ElementCount MaxVF) {
8664   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8665 
8666   // Collect instructions from the original loop that will become trivially dead
8667   // in the vectorized loop. We don't need to vectorize these instructions. For
8668   // example, original induction update instructions can become dead because we
8669   // separately emit induction "steps" when generating code for the new loop.
8670   // Similarly, we create a new latch condition when setting up the structure
8671   // of the new loop, so the old one can become dead.
8672   SmallPtrSet<Instruction *, 4> DeadInstructions;
8673   collectTriviallyDeadInstructions(DeadInstructions);
8674 
8675   // Add assume instructions we need to drop to DeadInstructions, to prevent
8676   // them from being added to the VPlan.
8677   // TODO: We only need to drop assumes in blocks that get flattend. If the
8678   // control flow is preserved, we should keep them.
8679   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8680   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8681 
8682   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8683   // Dead instructions do not need sinking. Remove them from SinkAfter.
8684   for (Instruction *I : DeadInstructions)
8685     SinkAfter.erase(I);
8686 
8687   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8688   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8689     VFRange SubRange = {VF, MaxVFPlusOne};
8690     VPlans.push_back(
8691         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8692     VF = SubRange.End;
8693   }
8694 }
8695 
8696 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8697     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8698     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
8699 
8700   // Hold a mapping from predicated instructions to their recipes, in order to
8701   // fix their AlsoPack behavior if a user is determined to replicate and use a
8702   // scalar instead of vector value.
8703   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
8704 
8705   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8706 
8707   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8708 
8709   // ---------------------------------------------------------------------------
8710   // Pre-construction: record ingredients whose recipes we'll need to further
8711   // process after constructing the initial VPlan.
8712   // ---------------------------------------------------------------------------
8713 
8714   // Mark instructions we'll need to sink later and their targets as
8715   // ingredients whose recipe we'll need to record.
8716   for (auto &Entry : SinkAfter) {
8717     RecipeBuilder.recordRecipeOf(Entry.first);
8718     RecipeBuilder.recordRecipeOf(Entry.second);
8719   }
8720   for (auto &Reduction : CM.getInLoopReductionChains()) {
8721     PHINode *Phi = Reduction.first;
8722     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
8723     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8724 
8725     RecipeBuilder.recordRecipeOf(Phi);
8726     for (auto &R : ReductionOperations) {
8727       RecipeBuilder.recordRecipeOf(R);
8728       // For min/max reducitons, where we have a pair of icmp/select, we also
8729       // need to record the ICmp recipe, so it can be removed later.
8730       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8731         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8732     }
8733   }
8734 
8735   // For each interleave group which is relevant for this (possibly trimmed)
8736   // Range, add it to the set of groups to be later applied to the VPlan and add
8737   // placeholders for its members' Recipes which we'll be replacing with a
8738   // single VPInterleaveRecipe.
8739   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8740     auto applyIG = [IG, this](ElementCount VF) -> bool {
8741       return (VF.isVector() && // Query is illegal for VF == 1
8742               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8743                   LoopVectorizationCostModel::CM_Interleave);
8744     };
8745     if (!getDecisionAndClampRange(applyIG, Range))
8746       continue;
8747     InterleaveGroups.insert(IG);
8748     for (unsigned i = 0; i < IG->getFactor(); i++)
8749       if (Instruction *Member = IG->getMember(i))
8750         RecipeBuilder.recordRecipeOf(Member);
8751   };
8752 
8753   // ---------------------------------------------------------------------------
8754   // Build initial VPlan: Scan the body of the loop in a topological order to
8755   // visit each basic block after having visited its predecessor basic blocks.
8756   // ---------------------------------------------------------------------------
8757 
8758   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8759   auto Plan = std::make_unique<VPlan>();
8760   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8761   Plan->setEntry(VPBB);
8762 
8763   // Scan the body of the loop in a topological order to visit each basic block
8764   // after having visited its predecessor basic blocks.
8765   LoopBlocksDFS DFS(OrigLoop);
8766   DFS.perform(LI);
8767 
8768   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8769     // Relevant instructions from basic block BB will be grouped into VPRecipe
8770     // ingredients and fill a new VPBasicBlock.
8771     unsigned VPBBsForBB = 0;
8772     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8773     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
8774     VPBB = FirstVPBBForBB;
8775     Builder.setInsertPoint(VPBB);
8776 
8777     // Introduce each ingredient into VPlan.
8778     // TODO: Model and preserve debug instrinsics in VPlan.
8779     for (Instruction &I : BB->instructionsWithoutDebug()) {
8780       Instruction *Instr = &I;
8781 
8782       // First filter out irrelevant instructions, to ensure no recipes are
8783       // built for them.
8784       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8785         continue;
8786 
8787       if (auto RecipeOrValue =
8788               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
8789         // If Instr can be simplified to an existing VPValue, use it.
8790         if (RecipeOrValue.is<VPValue *>()) {
8791           Plan->addVPValue(Instr, RecipeOrValue.get<VPValue *>());
8792           continue;
8793         }
8794         // Otherwise, add the new recipe.
8795         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8796         for (auto *Def : Recipe->definedValues()) {
8797           auto *UV = Def->getUnderlyingValue();
8798           Plan->addVPValue(UV, Def);
8799         }
8800 
8801         RecipeBuilder.setRecipe(Instr, Recipe);
8802         VPBB->appendRecipe(Recipe);
8803         continue;
8804       }
8805 
8806       // Otherwise, if all widening options failed, Instruction is to be
8807       // replicated. This may create a successor for VPBB.
8808       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
8809           Instr, Range, VPBB, PredInst2Recipe, Plan);
8810       if (NextVPBB != VPBB) {
8811         VPBB = NextVPBB;
8812         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8813                                     : "");
8814       }
8815     }
8816   }
8817 
8818   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8819   // may also be empty, such as the last one VPBB, reflecting original
8820   // basic-blocks with no recipes.
8821   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8822   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8823   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8824   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
8825   delete PreEntry;
8826 
8827   // ---------------------------------------------------------------------------
8828   // Transform initial VPlan: Apply previously taken decisions, in order, to
8829   // bring the VPlan to its final state.
8830   // ---------------------------------------------------------------------------
8831 
8832   // Apply Sink-After legal constraints.
8833   for (auto &Entry : SinkAfter) {
8834     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8835     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8836     // If the target is in a replication region, make sure to move Sink to the
8837     // block after it, not into the replication region itself.
8838     if (auto *Region =
8839             dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) {
8840       if (Region->isReplicator()) {
8841         assert(Region->getNumSuccessors() == 1 && "Expected SESE region!");
8842         VPBasicBlock *NextBlock =
8843             cast<VPBasicBlock>(Region->getSuccessors().front());
8844         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8845         continue;
8846       }
8847     }
8848     Sink->moveAfter(Target);
8849   }
8850 
8851   // Interleave memory: for each Interleave Group we marked earlier as relevant
8852   // for this VPlan, replace the Recipes widening its memory instructions with a
8853   // single VPInterleaveRecipe at its insertion point.
8854   for (auto IG : InterleaveGroups) {
8855     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8856         RecipeBuilder.getRecipe(IG->getInsertPos()));
8857     SmallVector<VPValue *, 4> StoredValues;
8858     for (unsigned i = 0; i < IG->getFactor(); ++i)
8859       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
8860         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
8861 
8862     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8863                                         Recipe->getMask());
8864     VPIG->insertBefore(Recipe);
8865     unsigned J = 0;
8866     for (unsigned i = 0; i < IG->getFactor(); ++i)
8867       if (Instruction *Member = IG->getMember(i)) {
8868         if (!Member->getType()->isVoidTy()) {
8869           VPValue *OriginalV = Plan->getVPValue(Member);
8870           Plan->removeVPValueFor(Member);
8871           Plan->addVPValue(Member, VPIG->getVPValue(J));
8872           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8873           J++;
8874         }
8875         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8876       }
8877   }
8878 
8879   // Adjust the recipes for any inloop reductions.
8880   if (Range.Start.isVector())
8881     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
8882 
8883   // Finally, if tail is folded by masking, introduce selects between the phi
8884   // and the live-out instruction of each reduction, at the end of the latch.
8885   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
8886     Builder.setInsertPoint(VPBB);
8887     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
8888     for (auto &Reduction : Legal->getReductionVars()) {
8889       if (CM.isInLoopReduction(Reduction.first))
8890         continue;
8891       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
8892       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
8893       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
8894     }
8895   }
8896 
8897   std::string PlanName;
8898   raw_string_ostream RSO(PlanName);
8899   ElementCount VF = Range.Start;
8900   Plan->addVF(VF);
8901   RSO << "Initial VPlan for VF={" << VF;
8902   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
8903     Plan->addVF(VF);
8904     RSO << "," << VF;
8905   }
8906   RSO << "},UF>=1";
8907   RSO.flush();
8908   Plan->setName(PlanName);
8909 
8910   return Plan;
8911 }
8912 
8913 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
8914   // Outer loop handling: They may require CFG and instruction level
8915   // transformations before even evaluating whether vectorization is profitable.
8916   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8917   // the vectorization pipeline.
8918   assert(!OrigLoop->isInnermost());
8919   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8920 
8921   // Create new empty VPlan
8922   auto Plan = std::make_unique<VPlan>();
8923 
8924   // Build hierarchical CFG
8925   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
8926   HCFGBuilder.buildHierarchicalCFG();
8927 
8928   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
8929        VF *= 2)
8930     Plan->addVF(VF);
8931 
8932   if (EnableVPlanPredication) {
8933     VPlanPredicator VPP(*Plan);
8934     VPP.predicate();
8935 
8936     // Avoid running transformation to recipes until masked code generation in
8937     // VPlan-native path is in place.
8938     return Plan;
8939   }
8940 
8941   SmallPtrSet<Instruction *, 1> DeadInstructions;
8942   VPlanTransforms::VPInstructionsToVPRecipes(
8943       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
8944   return Plan;
8945 }
8946 
8947 // Adjust the recipes for any inloop reductions. The chain of instructions
8948 // leading from the loop exit instr to the phi need to be converted to
8949 // reductions, with one operand being vector and the other being the scalar
8950 // reduction chain.
8951 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
8952     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
8953   for (auto &Reduction : CM.getInLoopReductionChains()) {
8954     PHINode *Phi = Reduction.first;
8955     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8956     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8957 
8958     // ReductionOperations are orders top-down from the phi's use to the
8959     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
8960     // which of the two operands will remain scalar and which will be reduced.
8961     // For minmax the chain will be the select instructions.
8962     Instruction *Chain = Phi;
8963     for (Instruction *R : ReductionOperations) {
8964       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
8965       RecurKind Kind = RdxDesc.getRecurrenceKind();
8966 
8967       VPValue *ChainOp = Plan->getVPValue(Chain);
8968       unsigned FirstOpId;
8969       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8970         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
8971                "Expected to replace a VPWidenSelectSC");
8972         FirstOpId = 1;
8973       } else {
8974         assert(isa<VPWidenRecipe>(WidenRecipe) &&
8975                "Expected to replace a VPWidenSC");
8976         FirstOpId = 0;
8977       }
8978       unsigned VecOpId =
8979           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
8980       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
8981 
8982       auto *CondOp = CM.foldTailByMasking()
8983                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
8984                          : nullptr;
8985       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
8986           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
8987       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8988       Plan->removeVPValueFor(R);
8989       Plan->addVPValue(R, RedRecipe);
8990       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
8991       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8992       WidenRecipe->eraseFromParent();
8993 
8994       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8995         VPRecipeBase *CompareRecipe =
8996             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
8997         assert(isa<VPWidenRecipe>(CompareRecipe) &&
8998                "Expected to replace a VPWidenSC");
8999         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9000                "Expected no remaining users");
9001         CompareRecipe->eraseFromParent();
9002       }
9003       Chain = R;
9004     }
9005   }
9006 }
9007 
9008 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9009                                VPSlotTracker &SlotTracker) const {
9010   O << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9011   IG->getInsertPos()->printAsOperand(O, false);
9012   O << ", ";
9013   getAddr()->printAsOperand(O, SlotTracker);
9014   VPValue *Mask = getMask();
9015   if (Mask) {
9016     O << ", ";
9017     Mask->printAsOperand(O, SlotTracker);
9018   }
9019   for (unsigned i = 0; i < IG->getFactor(); ++i)
9020     if (Instruction *I = IG->getMember(i))
9021       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
9022 }
9023 
9024 void VPWidenCallRecipe::execute(VPTransformState &State) {
9025   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9026                                   *this, State);
9027 }
9028 
9029 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9030   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9031                                     this, *this, InvariantCond, State);
9032 }
9033 
9034 void VPWidenRecipe::execute(VPTransformState &State) {
9035   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9036 }
9037 
9038 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9039   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9040                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9041                       IsIndexLoopInvariant, State);
9042 }
9043 
9044 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9045   assert(!State.Instance && "Int or FP induction being replicated.");
9046   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9047                                    getTruncInst(), getVPValue(0),
9048                                    getCastValue(), State);
9049 }
9050 
9051 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9052   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
9053                                  getStartValue(), this, State);
9054 }
9055 
9056 void VPBlendRecipe::execute(VPTransformState &State) {
9057   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9058   // We know that all PHIs in non-header blocks are converted into
9059   // selects, so we don't have to worry about the insertion order and we
9060   // can just use the builder.
9061   // At this point we generate the predication tree. There may be
9062   // duplications since this is a simple recursive scan, but future
9063   // optimizations will clean it up.
9064 
9065   unsigned NumIncoming = getNumIncomingValues();
9066 
9067   // Generate a sequence of selects of the form:
9068   // SELECT(Mask3, In3,
9069   //        SELECT(Mask2, In2,
9070   //               SELECT(Mask1, In1,
9071   //                      In0)))
9072   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9073   // are essentially undef are taken from In0.
9074   InnerLoopVectorizer::VectorParts Entry(State.UF);
9075   for (unsigned In = 0; In < NumIncoming; ++In) {
9076     for (unsigned Part = 0; Part < State.UF; ++Part) {
9077       // We might have single edge PHIs (blocks) - use an identity
9078       // 'select' for the first PHI operand.
9079       Value *In0 = State.get(getIncomingValue(In), Part);
9080       if (In == 0)
9081         Entry[Part] = In0; // Initialize with the first incoming value.
9082       else {
9083         // Select between the current value and the previous incoming edge
9084         // based on the incoming mask.
9085         Value *Cond = State.get(getMask(In), Part);
9086         Entry[Part] =
9087             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9088       }
9089     }
9090   }
9091   for (unsigned Part = 0; Part < State.UF; ++Part)
9092     State.set(this, Entry[Part], Part);
9093 }
9094 
9095 void VPInterleaveRecipe::execute(VPTransformState &State) {
9096   assert(!State.Instance && "Interleave group being replicated.");
9097   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9098                                       getStoredValues(), getMask());
9099 }
9100 
9101 void VPReductionRecipe::execute(VPTransformState &State) {
9102   assert(!State.Instance && "Reduction being replicated.");
9103   for (unsigned Part = 0; Part < State.UF; ++Part) {
9104     RecurKind Kind = RdxDesc->getRecurrenceKind();
9105     Value *NewVecOp = State.get(getVecOp(), Part);
9106     if (VPValue *Cond = getCondOp()) {
9107       Value *NewCond = State.get(Cond, Part);
9108       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9109       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9110           Kind, VecTy->getElementType());
9111       Constant *IdenVec =
9112           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9113       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9114       NewVecOp = Select;
9115     }
9116     Value *NewRed =
9117         createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9118     Value *PrevInChain = State.get(getChainOp(), Part);
9119     Value *NextInChain;
9120     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9121       NextInChain =
9122           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9123                          NewRed, PrevInChain);
9124     } else {
9125       NextInChain = State.Builder.CreateBinOp(
9126           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9127           PrevInChain);
9128     }
9129     State.set(this, NextInChain, Part);
9130   }
9131 }
9132 
9133 void VPReplicateRecipe::execute(VPTransformState &State) {
9134   if (State.Instance) { // Generate a single instance.
9135     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9136     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9137                                     *State.Instance, IsPredicated, State);
9138     // Insert scalar instance packing it into a vector.
9139     if (AlsoPack && State.VF.isVector()) {
9140       // If we're constructing lane 0, initialize to start from poison.
9141       if (State.Instance->Lane.isFirstLane()) {
9142         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9143         Value *Poison = PoisonValue::get(
9144             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9145         State.set(this, Poison, State.Instance->Part);
9146       }
9147       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9148     }
9149     return;
9150   }
9151 
9152   // Generate scalar instances for all VF lanes of all UF parts, unless the
9153   // instruction is uniform inwhich case generate only the first lane for each
9154   // of the UF parts.
9155   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9156   assert((!State.VF.isScalable() || IsUniform) &&
9157          "Can't scalarize a scalable vector");
9158   for (unsigned Part = 0; Part < State.UF; ++Part)
9159     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9160       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9161                                       VPIteration(Part, Lane), IsPredicated,
9162                                       State);
9163 }
9164 
9165 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9166   assert(State.Instance && "Branch on Mask works only on single instance.");
9167 
9168   unsigned Part = State.Instance->Part;
9169   unsigned Lane = State.Instance->Lane.getKnownLane();
9170 
9171   Value *ConditionBit = nullptr;
9172   VPValue *BlockInMask = getMask();
9173   if (BlockInMask) {
9174     ConditionBit = State.get(BlockInMask, Part);
9175     if (ConditionBit->getType()->isVectorTy())
9176       ConditionBit = State.Builder.CreateExtractElement(
9177           ConditionBit, State.Builder.getInt32(Lane));
9178   } else // Block in mask is all-one.
9179     ConditionBit = State.Builder.getTrue();
9180 
9181   // Replace the temporary unreachable terminator with a new conditional branch,
9182   // whose two destinations will be set later when they are created.
9183   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9184   assert(isa<UnreachableInst>(CurrentTerminator) &&
9185          "Expected to replace unreachable terminator with conditional branch.");
9186   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9187   CondBr->setSuccessor(0, nullptr);
9188   ReplaceInstWithInst(CurrentTerminator, CondBr);
9189 }
9190 
9191 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9192   assert(State.Instance && "Predicated instruction PHI works per instance.");
9193   Instruction *ScalarPredInst =
9194       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9195   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9196   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9197   assert(PredicatingBB && "Predicated block has no single predecessor.");
9198   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9199          "operand must be VPReplicateRecipe");
9200 
9201   // By current pack/unpack logic we need to generate only a single phi node: if
9202   // a vector value for the predicated instruction exists at this point it means
9203   // the instruction has vector users only, and a phi for the vector value is
9204   // needed. In this case the recipe of the predicated instruction is marked to
9205   // also do that packing, thereby "hoisting" the insert-element sequence.
9206   // Otherwise, a phi node for the scalar value is needed.
9207   unsigned Part = State.Instance->Part;
9208   if (State.hasVectorValue(getOperand(0), Part)) {
9209     Value *VectorValue = State.get(getOperand(0), Part);
9210     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9211     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9212     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9213     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9214     if (State.hasVectorValue(this, Part))
9215       State.reset(this, VPhi, Part);
9216     else
9217       State.set(this, VPhi, Part);
9218     // NOTE: Currently we need to update the value of the operand, so the next
9219     // predicated iteration inserts its generated value in the correct vector.
9220     State.reset(getOperand(0), VPhi, Part);
9221   } else {
9222     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9223     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9224     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9225                      PredicatingBB);
9226     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9227     if (State.hasScalarValue(this, *State.Instance))
9228       State.reset(this, Phi, *State.Instance);
9229     else
9230       State.set(this, Phi, *State.Instance);
9231     // NOTE: Currently we need to update the value of the operand, so the next
9232     // predicated iteration inserts its generated value in the correct vector.
9233     State.reset(getOperand(0), Phi, *State.Instance);
9234   }
9235 }
9236 
9237 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9238   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9239   State.ILV->vectorizeMemoryInstruction(&Ingredient, State,
9240                                         StoredValue ? nullptr : getVPValue(),
9241                                         getAddr(), StoredValue, getMask());
9242 }
9243 
9244 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9245 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9246 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9247 // for predication.
9248 static ScalarEpilogueLowering getScalarEpilogueLowering(
9249     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9250     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9251     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9252     LoopVectorizationLegality &LVL) {
9253   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9254   // don't look at hints or options, and don't request a scalar epilogue.
9255   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9256   // LoopAccessInfo (due to code dependency and not being able to reliably get
9257   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9258   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9259   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9260   // back to the old way and vectorize with versioning when forced. See D81345.)
9261   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9262                                                       PGSOQueryType::IRPass) &&
9263                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9264     return CM_ScalarEpilogueNotAllowedOptSize;
9265 
9266   // 2) If set, obey the directives
9267   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9268     switch (PreferPredicateOverEpilogue) {
9269     case PreferPredicateTy::ScalarEpilogue:
9270       return CM_ScalarEpilogueAllowed;
9271     case PreferPredicateTy::PredicateElseScalarEpilogue:
9272       return CM_ScalarEpilogueNotNeededUsePredicate;
9273     case PreferPredicateTy::PredicateOrDontVectorize:
9274       return CM_ScalarEpilogueNotAllowedUsePredicate;
9275     };
9276   }
9277 
9278   // 3) If set, obey the hints
9279   switch (Hints.getPredicate()) {
9280   case LoopVectorizeHints::FK_Enabled:
9281     return CM_ScalarEpilogueNotNeededUsePredicate;
9282   case LoopVectorizeHints::FK_Disabled:
9283     return CM_ScalarEpilogueAllowed;
9284   };
9285 
9286   // 4) if the TTI hook indicates this is profitable, request predication.
9287   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9288                                        LVL.getLAI()))
9289     return CM_ScalarEpilogueNotNeededUsePredicate;
9290 
9291   return CM_ScalarEpilogueAllowed;
9292 }
9293 
9294 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9295   // If Values have been set for this Def return the one relevant for \p Part.
9296   if (hasVectorValue(Def, Part))
9297     return Data.PerPartOutput[Def][Part];
9298 
9299   if (!hasScalarValue(Def, {Part, 0})) {
9300     Value *IRV = Def->getLiveInIRValue();
9301     Value *B = ILV->getBroadcastInstrs(IRV);
9302     set(Def, B, Part);
9303     return B;
9304   }
9305 
9306   Value *ScalarValue = get(Def, {Part, 0});
9307   // If we aren't vectorizing, we can just copy the scalar map values over
9308   // to the vector map.
9309   if (VF.isScalar()) {
9310     set(Def, ScalarValue, Part);
9311     return ScalarValue;
9312   }
9313 
9314   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9315   bool IsUniform = RepR && RepR->isUniform();
9316 
9317   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9318   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9319 
9320   // Set the insert point after the last scalarized instruction. This
9321   // ensures the insertelement sequence will directly follow the scalar
9322   // definitions.
9323   auto OldIP = Builder.saveIP();
9324   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9325   Builder.SetInsertPoint(&*NewIP);
9326 
9327   // However, if we are vectorizing, we need to construct the vector values.
9328   // If the value is known to be uniform after vectorization, we can just
9329   // broadcast the scalar value corresponding to lane zero for each unroll
9330   // iteration. Otherwise, we construct the vector values using
9331   // insertelement instructions. Since the resulting vectors are stored in
9332   // State, we will only generate the insertelements once.
9333   Value *VectorValue = nullptr;
9334   if (IsUniform) {
9335     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9336     set(Def, VectorValue, Part);
9337   } else {
9338     // Initialize packing with insertelements to start from undef.
9339     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9340     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9341     set(Def, Undef, Part);
9342     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9343       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9344     VectorValue = get(Def, Part);
9345   }
9346   Builder.restoreIP(OldIP);
9347   return VectorValue;
9348 }
9349 
9350 // Process the loop in the VPlan-native vectorization path. This path builds
9351 // VPlan upfront in the vectorization pipeline, which allows to apply
9352 // VPlan-to-VPlan transformations from the very beginning without modifying the
9353 // input LLVM IR.
9354 static bool processLoopInVPlanNativePath(
9355     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9356     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9357     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9358     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9359     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
9360 
9361   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9362     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9363     return false;
9364   }
9365   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9366   Function *F = L->getHeader()->getParent();
9367   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9368 
9369   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9370       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9371 
9372   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9373                                 &Hints, IAI);
9374   // Use the planner for outer loop vectorization.
9375   // TODO: CM is not used at this point inside the planner. Turn CM into an
9376   // optional argument if we don't need it in the future.
9377   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
9378 
9379   // Get user vectorization factor.
9380   ElementCount UserVF = Hints.getWidth();
9381 
9382   // Plan how to best vectorize, return the best VF and its cost.
9383   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9384 
9385   // If we are stress testing VPlan builds, do not attempt to generate vector
9386   // code. Masked vector code generation support will follow soon.
9387   // Also, do not attempt to vectorize if no vector code will be produced.
9388   if (VPlanBuildStressTest || EnableVPlanPredication ||
9389       VectorizationFactor::Disabled() == VF)
9390     return false;
9391 
9392   LVP.setBestPlan(VF.Width, 1);
9393 
9394   {
9395     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9396                              F->getParent()->getDataLayout());
9397     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9398                            &CM, BFI, PSI, Checks);
9399     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9400                       << L->getHeader()->getParent()->getName() << "\"\n");
9401     LVP.executePlan(LB, DT);
9402   }
9403 
9404   // Mark the loop as already vectorized to avoid vectorizing again.
9405   Hints.setAlreadyVectorized();
9406   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9407   return true;
9408 }
9409 
9410 // Emit a remark if there are stores to floats that required a floating point
9411 // extension. If the vectorized loop was generated with floating point there
9412 // will be a performance penalty from the conversion overhead and the change in
9413 // the vector width.
9414 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9415   SmallVector<Instruction *, 4> Worklist;
9416   for (BasicBlock *BB : L->getBlocks()) {
9417     for (Instruction &Inst : *BB) {
9418       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9419         if (S->getValueOperand()->getType()->isFloatTy())
9420           Worklist.push_back(S);
9421       }
9422     }
9423   }
9424 
9425   // Traverse the floating point stores upwards searching, for floating point
9426   // conversions.
9427   SmallPtrSet<const Instruction *, 4> Visited;
9428   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9429   while (!Worklist.empty()) {
9430     auto *I = Worklist.pop_back_val();
9431     if (!L->contains(I))
9432       continue;
9433     if (!Visited.insert(I).second)
9434       continue;
9435 
9436     // Emit a remark if the floating point store required a floating
9437     // point conversion.
9438     // TODO: More work could be done to identify the root cause such as a
9439     // constant or a function return type and point the user to it.
9440     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9441       ORE->emit([&]() {
9442         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9443                                           I->getDebugLoc(), L->getHeader())
9444                << "floating point conversion changes vector width. "
9445                << "Mixed floating point precision requires an up/down "
9446                << "cast that will negatively impact performance.";
9447       });
9448 
9449     for (Use &Op : I->operands())
9450       if (auto *OpI = dyn_cast<Instruction>(Op))
9451         Worklist.push_back(OpI);
9452   }
9453 }
9454 
9455 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9456     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9457                                !EnableLoopInterleaving),
9458       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9459                               !EnableLoopVectorization) {}
9460 
9461 bool LoopVectorizePass::processLoop(Loop *L) {
9462   assert((EnableVPlanNativePath || L->isInnermost()) &&
9463          "VPlan-native path is not enabled. Only process inner loops.");
9464 
9465 #ifndef NDEBUG
9466   const std::string DebugLocStr = getDebugLocString(L);
9467 #endif /* NDEBUG */
9468 
9469   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9470                     << L->getHeader()->getParent()->getName() << "\" from "
9471                     << DebugLocStr << "\n");
9472 
9473   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9474 
9475   LLVM_DEBUG(
9476       dbgs() << "LV: Loop hints:"
9477              << " force="
9478              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9479                      ? "disabled"
9480                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9481                             ? "enabled"
9482                             : "?"))
9483              << " width=" << Hints.getWidth()
9484              << " unroll=" << Hints.getInterleave() << "\n");
9485 
9486   // Function containing loop
9487   Function *F = L->getHeader()->getParent();
9488 
9489   // Looking at the diagnostic output is the only way to determine if a loop
9490   // was vectorized (other than looking at the IR or machine code), so it
9491   // is important to generate an optimization remark for each loop. Most of
9492   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9493   // generated as OptimizationRemark and OptimizationRemarkMissed are
9494   // less verbose reporting vectorized loops and unvectorized loops that may
9495   // benefit from vectorization, respectively.
9496 
9497   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9498     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9499     return false;
9500   }
9501 
9502   PredicatedScalarEvolution PSE(*SE, *L);
9503 
9504   // Check if it is legal to vectorize the loop.
9505   LoopVectorizationRequirements Requirements(*ORE);
9506   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9507                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9508   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9509     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9510     Hints.emitRemarkWithHints();
9511     return false;
9512   }
9513 
9514   // Check the function attributes and profiles to find out if this function
9515   // should be optimized for size.
9516   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9517       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9518 
9519   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9520   // here. They may require CFG and instruction level transformations before
9521   // even evaluating whether vectorization is profitable. Since we cannot modify
9522   // the incoming IR, we need to build VPlan upfront in the vectorization
9523   // pipeline.
9524   if (!L->isInnermost())
9525     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9526                                         ORE, BFI, PSI, Hints);
9527 
9528   assert(L->isInnermost() && "Inner loop expected.");
9529 
9530   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9531   // count by optimizing for size, to minimize overheads.
9532   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9533   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9534     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9535                       << "This loop is worth vectorizing only if no scalar "
9536                       << "iteration overheads are incurred.");
9537     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9538       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9539     else {
9540       LLVM_DEBUG(dbgs() << "\n");
9541       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9542     }
9543   }
9544 
9545   // Check the function attributes to see if implicit floats are allowed.
9546   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9547   // an integer loop and the vector instructions selected are purely integer
9548   // vector instructions?
9549   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9550     reportVectorizationFailure(
9551         "Can't vectorize when the NoImplicitFloat attribute is used",
9552         "loop not vectorized due to NoImplicitFloat attribute",
9553         "NoImplicitFloat", ORE, L);
9554     Hints.emitRemarkWithHints();
9555     return false;
9556   }
9557 
9558   // Check if the target supports potentially unsafe FP vectorization.
9559   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9560   // for the target we're vectorizing for, to make sure none of the
9561   // additional fp-math flags can help.
9562   if (Hints.isPotentiallyUnsafe() &&
9563       TTI->isFPVectorizationPotentiallyUnsafe()) {
9564     reportVectorizationFailure(
9565         "Potentially unsafe FP op prevents vectorization",
9566         "loop not vectorized due to unsafe FP support.",
9567         "UnsafeFP", ORE, L);
9568     Hints.emitRemarkWithHints();
9569     return false;
9570   }
9571 
9572   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9573   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9574 
9575   // If an override option has been passed in for interleaved accesses, use it.
9576   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9577     UseInterleaved = EnableInterleavedMemAccesses;
9578 
9579   // Analyze interleaved memory accesses.
9580   if (UseInterleaved) {
9581     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
9582   }
9583 
9584   // Use the cost model.
9585   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9586                                 F, &Hints, IAI);
9587   CM.collectValuesToIgnore();
9588 
9589   // Use the planner for vectorization.
9590   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
9591 
9592   // Get user vectorization factor and interleave count.
9593   ElementCount UserVF = Hints.getWidth();
9594   unsigned UserIC = Hints.getInterleave();
9595 
9596   // Plan how to best vectorize, return the best VF and its cost.
9597   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
9598 
9599   VectorizationFactor VF = VectorizationFactor::Disabled();
9600   unsigned IC = 1;
9601 
9602   if (MaybeVF) {
9603     VF = *MaybeVF;
9604     // Select the interleave count.
9605     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
9606   }
9607 
9608   // Identify the diagnostic messages that should be produced.
9609   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9610   bool VectorizeLoop = true, InterleaveLoop = true;
9611   if (Requirements.doesNotMeet(F, L, Hints)) {
9612     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
9613                          "requirements.\n");
9614     Hints.emitRemarkWithHints();
9615     return false;
9616   }
9617 
9618   if (VF.Width.isScalar()) {
9619     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9620     VecDiagMsg = std::make_pair(
9621         "VectorizationNotBeneficial",
9622         "the cost-model indicates that vectorization is not beneficial");
9623     VectorizeLoop = false;
9624   }
9625 
9626   if (!MaybeVF && UserIC > 1) {
9627     // Tell the user interleaving was avoided up-front, despite being explicitly
9628     // requested.
9629     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9630                          "interleaving should be avoided up front\n");
9631     IntDiagMsg = std::make_pair(
9632         "InterleavingAvoided",
9633         "Ignoring UserIC, because interleaving was avoided up front");
9634     InterleaveLoop = false;
9635   } else if (IC == 1 && UserIC <= 1) {
9636     // Tell the user interleaving is not beneficial.
9637     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9638     IntDiagMsg = std::make_pair(
9639         "InterleavingNotBeneficial",
9640         "the cost-model indicates that interleaving is not beneficial");
9641     InterleaveLoop = false;
9642     if (UserIC == 1) {
9643       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9644       IntDiagMsg.second +=
9645           " and is explicitly disabled or interleave count is set to 1";
9646     }
9647   } else if (IC > 1 && UserIC == 1) {
9648     // Tell the user interleaving is beneficial, but it explicitly disabled.
9649     LLVM_DEBUG(
9650         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
9651     IntDiagMsg = std::make_pair(
9652         "InterleavingBeneficialButDisabled",
9653         "the cost-model indicates that interleaving is beneficial "
9654         "but is explicitly disabled or interleave count is set to 1");
9655     InterleaveLoop = false;
9656   }
9657 
9658   // Override IC if user provided an interleave count.
9659   IC = UserIC > 0 ? UserIC : IC;
9660 
9661   // Emit diagnostic messages, if any.
9662   const char *VAPassName = Hints.vectorizeAnalysisPassName();
9663   if (!VectorizeLoop && !InterleaveLoop) {
9664     // Do not vectorize or interleaving the loop.
9665     ORE->emit([&]() {
9666       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9667                                       L->getStartLoc(), L->getHeader())
9668              << VecDiagMsg.second;
9669     });
9670     ORE->emit([&]() {
9671       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9672                                       L->getStartLoc(), L->getHeader())
9673              << IntDiagMsg.second;
9674     });
9675     return false;
9676   } else if (!VectorizeLoop && InterleaveLoop) {
9677     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9678     ORE->emit([&]() {
9679       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9680                                         L->getStartLoc(), L->getHeader())
9681              << VecDiagMsg.second;
9682     });
9683   } else if (VectorizeLoop && !InterleaveLoop) {
9684     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9685                       << ") in " << DebugLocStr << '\n');
9686     ORE->emit([&]() {
9687       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9688                                         L->getStartLoc(), L->getHeader())
9689              << IntDiagMsg.second;
9690     });
9691   } else if (VectorizeLoop && InterleaveLoop) {
9692     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9693                       << ") in " << DebugLocStr << '\n');
9694     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9695   }
9696 
9697   bool DisableRuntimeUnroll = false;
9698   MDNode *OrigLoopID = L->getLoopID();
9699   {
9700     // Optimistically generate runtime checks. Drop them if they turn out to not
9701     // be profitable. Limit the scope of Checks, so the cleanup happens
9702     // immediately after vector codegeneration is done.
9703     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9704                              F->getParent()->getDataLayout());
9705     if (!VF.Width.isScalar() || IC > 1)
9706       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
9707     LVP.setBestPlan(VF.Width, IC);
9708 
9709     using namespace ore;
9710     if (!VectorizeLoop) {
9711       assert(IC > 1 && "interleave count should not be 1 or 0");
9712       // If we decided that it is not legal to vectorize the loop, then
9713       // interleave it.
9714       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
9715                                  &CM, BFI, PSI, Checks);
9716       LVP.executePlan(Unroller, DT);
9717 
9718       ORE->emit([&]() {
9719         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9720                                   L->getHeader())
9721                << "interleaved loop (interleaved count: "
9722                << NV("InterleaveCount", IC) << ")";
9723       });
9724     } else {
9725       // If we decided that it is *legal* to vectorize the loop, then do it.
9726 
9727       // Consider vectorizing the epilogue too if it's profitable.
9728       VectorizationFactor EpilogueVF =
9729           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
9730       if (EpilogueVF.Width.isVector()) {
9731 
9732         // The first pass vectorizes the main loop and creates a scalar epilogue
9733         // to be vectorized by executing the plan (potentially with a different
9734         // factor) again shortly afterwards.
9735         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
9736                                           EpilogueVF.Width.getKnownMinValue(),
9737                                           1);
9738         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
9739                                            EPI, &LVL, &CM, BFI, PSI, Checks);
9740 
9741         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
9742         LVP.executePlan(MainILV, DT);
9743         ++LoopsVectorized;
9744 
9745         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9746         formLCSSARecursively(*L, *DT, LI, SE);
9747 
9748         // Second pass vectorizes the epilogue and adjusts the control flow
9749         // edges from the first pass.
9750         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
9751         EPI.MainLoopVF = EPI.EpilogueVF;
9752         EPI.MainLoopUF = EPI.EpilogueUF;
9753         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
9754                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
9755                                                  Checks);
9756         LVP.executePlan(EpilogILV, DT);
9757         ++LoopsEpilogueVectorized;
9758 
9759         if (!MainILV.areSafetyChecksAdded())
9760           DisableRuntimeUnroll = true;
9761       } else {
9762         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
9763                                &LVL, &CM, BFI, PSI, Checks);
9764         LVP.executePlan(LB, DT);
9765         ++LoopsVectorized;
9766 
9767         // Add metadata to disable runtime unrolling a scalar loop when there
9768         // are no runtime checks about strides and memory. A scalar loop that is
9769         // rarely used is not worth unrolling.
9770         if (!LB.areSafetyChecksAdded())
9771           DisableRuntimeUnroll = true;
9772       }
9773       // Report the vectorization decision.
9774       ORE->emit([&]() {
9775         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
9776                                   L->getHeader())
9777                << "vectorized loop (vectorization width: "
9778                << NV("VectorizationFactor", VF.Width)
9779                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
9780       });
9781     }
9782 
9783     if (ORE->allowExtraAnalysis(LV_NAME))
9784       checkMixedPrecision(L, ORE);
9785   }
9786 
9787   Optional<MDNode *> RemainderLoopID =
9788       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
9789                                       LLVMLoopVectorizeFollowupEpilogue});
9790   if (RemainderLoopID.hasValue()) {
9791     L->setLoopID(RemainderLoopID.getValue());
9792   } else {
9793     if (DisableRuntimeUnroll)
9794       AddRuntimeUnrollDisableMetaData(L);
9795 
9796     // Mark the loop as already vectorized to avoid vectorizing again.
9797     Hints.setAlreadyVectorized();
9798   }
9799 
9800   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9801   return true;
9802 }
9803 
9804 LoopVectorizeResult LoopVectorizePass::runImpl(
9805     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
9806     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
9807     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
9808     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
9809     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
9810   SE = &SE_;
9811   LI = &LI_;
9812   TTI = &TTI_;
9813   DT = &DT_;
9814   BFI = &BFI_;
9815   TLI = TLI_;
9816   AA = &AA_;
9817   AC = &AC_;
9818   GetLAA = &GetLAA_;
9819   DB = &DB_;
9820   ORE = &ORE_;
9821   PSI = PSI_;
9822 
9823   // Don't attempt if
9824   // 1. the target claims to have no vector registers, and
9825   // 2. interleaving won't help ILP.
9826   //
9827   // The second condition is necessary because, even if the target has no
9828   // vector registers, loop vectorization may still enable scalar
9829   // interleaving.
9830   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9831       TTI->getMaxInterleaveFactor(1) < 2)
9832     return LoopVectorizeResult(false, false);
9833 
9834   bool Changed = false, CFGChanged = false;
9835 
9836   // The vectorizer requires loops to be in simplified form.
9837   // Since simplification may add new inner loops, it has to run before the
9838   // legality and profitability checks. This means running the loop vectorizer
9839   // will simplify all loops, regardless of whether anything end up being
9840   // vectorized.
9841   for (auto &L : *LI)
9842     Changed |= CFGChanged |=
9843         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9844 
9845   // Build up a worklist of inner-loops to vectorize. This is necessary as
9846   // the act of vectorizing or partially unrolling a loop creates new loops
9847   // and can invalidate iterators across the loops.
9848   SmallVector<Loop *, 8> Worklist;
9849 
9850   for (Loop *L : *LI)
9851     collectSupportedLoops(*L, LI, ORE, Worklist);
9852 
9853   LoopsAnalyzed += Worklist.size();
9854 
9855   // Now walk the identified inner loops.
9856   while (!Worklist.empty()) {
9857     Loop *L = Worklist.pop_back_val();
9858 
9859     // For the inner loops we actually process, form LCSSA to simplify the
9860     // transform.
9861     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9862 
9863     Changed |= CFGChanged |= processLoop(L);
9864   }
9865 
9866   // Process each loop nest in the function.
9867   return LoopVectorizeResult(Changed, CFGChanged);
9868 }
9869 
9870 PreservedAnalyses LoopVectorizePass::run(Function &F,
9871                                          FunctionAnalysisManager &AM) {
9872     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
9873     auto &LI = AM.getResult<LoopAnalysis>(F);
9874     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
9875     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
9876     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
9877     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
9878     auto &AA = AM.getResult<AAManager>(F);
9879     auto &AC = AM.getResult<AssumptionAnalysis>(F);
9880     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
9881     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
9882     MemorySSA *MSSA = EnableMSSALoopDependency
9883                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
9884                           : nullptr;
9885 
9886     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
9887     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
9888         [&](Loop &L) -> const LoopAccessInfo & {
9889       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
9890                                         TLI, TTI, nullptr, MSSA};
9891       return LAM.getResult<LoopAccessAnalysis>(L, AR);
9892     };
9893     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9894     ProfileSummaryInfo *PSI =
9895         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9896     LoopVectorizeResult Result =
9897         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
9898     if (!Result.MadeAnyChange)
9899       return PreservedAnalyses::all();
9900     PreservedAnalyses PA;
9901 
9902     // We currently do not preserve loopinfo/dominator analyses with outer loop
9903     // vectorization. Until this is addressed, mark these analyses as preserved
9904     // only for non-VPlan-native path.
9905     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
9906     if (!EnableVPlanNativePath) {
9907       PA.preserve<LoopAnalysis>();
9908       PA.preserve<DominatorTreeAnalysis>();
9909     }
9910     PA.preserve<BasicAA>();
9911     PA.preserve<GlobalsAA>();
9912     if (!Result.MadeCFGChange)
9913       PA.preserveSet<CFGAnalyses>();
9914     return PA;
9915 }
9916