1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/PatternMatch.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/InstructionCost.h"
135 #include "llvm/Support/MathExtras.h"
136 #include "llvm/Support/raw_ostream.h"
137 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
138 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
139 #include "llvm/Transforms/Utils/LoopSimplify.h"
140 #include "llvm/Transforms/Utils/LoopUtils.h"
141 #include "llvm/Transforms/Utils/LoopVersioning.h"
142 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
143 #include "llvm/Transforms/Utils/SizeOpts.h"
144 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
145 #include <algorithm>
146 #include <cassert>
147 #include <cstdint>
148 #include <cstdlib>
149 #include <functional>
150 #include <iterator>
151 #include <limits>
152 #include <memory>
153 #include <string>
154 #include <tuple>
155 #include <utility>
156 
157 using namespace llvm;
158 
159 #define LV_NAME "loop-vectorize"
160 #define DEBUG_TYPE LV_NAME
161 
162 #ifndef NDEBUG
163 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
164 #endif
165 
166 /// @{
167 /// Metadata attribute names
168 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
169 const char LLVMLoopVectorizeFollowupVectorized[] =
170     "llvm.loop.vectorize.followup_vectorized";
171 const char LLVMLoopVectorizeFollowupEpilogue[] =
172     "llvm.loop.vectorize.followup_epilogue";
173 /// @}
174 
175 STATISTIC(LoopsVectorized, "Number of loops vectorized");
176 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
177 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
178 
179 static cl::opt<bool> EnableEpilogueVectorization(
180     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
181     cl::desc("Enable vectorization of epilogue loops."));
182 
183 static cl::opt<unsigned> EpilogueVectorizationForceVF(
184     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
185     cl::desc("When epilogue vectorization is enabled, and a value greater than "
186              "1 is specified, forces the given VF for all applicable epilogue "
187              "loops."));
188 
189 static cl::opt<unsigned> EpilogueVectorizationMinVF(
190     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
191     cl::desc("Only loops with vectorization factor equal to or larger than "
192              "the specified value are considered for epilogue vectorization."));
193 
194 /// Loops with a known constant trip count below this number are vectorized only
195 /// if no scalar iteration overheads are incurred.
196 static cl::opt<unsigned> TinyTripCountVectorThreshold(
197     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
198     cl::desc("Loops with a constant trip count that is smaller than this "
199              "value are vectorized only if no scalar iteration overheads "
200              "are incurred."));
201 
202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
203     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
204     cl::desc("The maximum allowed number of runtime memory checks with a "
205              "vectorize(enable) pragma."));
206 
207 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
208 // that predication is preferred, and this lists all options. I.e., the
209 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
210 // and predicate the instructions accordingly. If tail-folding fails, there are
211 // different fallback strategies depending on these values:
212 namespace PreferPredicateTy {
213   enum Option {
214     ScalarEpilogue = 0,
215     PredicateElseScalarEpilogue,
216     PredicateOrDontVectorize
217   };
218 } // namespace PreferPredicateTy
219 
220 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
221     "prefer-predicate-over-epilogue",
222     cl::init(PreferPredicateTy::ScalarEpilogue),
223     cl::Hidden,
224     cl::desc("Tail-folding and predication preferences over creating a scalar "
225              "epilogue loop."),
226     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
227                          "scalar-epilogue",
228                          "Don't tail-predicate loops, create scalar epilogue"),
229               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
230                          "predicate-else-scalar-epilogue",
231                          "prefer tail-folding, create scalar epilogue if tail "
232                          "folding fails."),
233               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
234                          "predicate-dont-vectorize",
235                          "prefers tail-folding, don't attempt vectorization if "
236                          "tail-folding fails.")));
237 
238 static cl::opt<bool> MaximizeBandwidth(
239     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
240     cl::desc("Maximize bandwidth when selecting vectorization factor which "
241              "will be determined by the smallest type in loop."));
242 
243 static cl::opt<bool> EnableInterleavedMemAccesses(
244     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
246 
247 /// An interleave-group may need masking if it resides in a block that needs
248 /// predication, or in order to mask away gaps.
249 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
250     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
251     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
252 
253 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
254     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
255     cl::desc("We don't interleave loops with a estimated constant trip count "
256              "below this number"));
257 
258 static cl::opt<unsigned> ForceTargetNumScalarRegs(
259     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
260     cl::desc("A flag that overrides the target's number of scalar registers."));
261 
262 static cl::opt<unsigned> ForceTargetNumVectorRegs(
263     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
264     cl::desc("A flag that overrides the target's number of vector registers."));
265 
266 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
267     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
268     cl::desc("A flag that overrides the target's max interleave factor for "
269              "scalar loops."));
270 
271 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
272     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
273     cl::desc("A flag that overrides the target's max interleave factor for "
274              "vectorized loops."));
275 
276 static cl::opt<unsigned> ForceTargetInstructionCost(
277     "force-target-instruction-cost", cl::init(0), cl::Hidden,
278     cl::desc("A flag that overrides the target's expected cost for "
279              "an instruction to a single constant value. Mostly "
280              "useful for getting consistent testing."));
281 
282 static cl::opt<bool> ForceTargetSupportsScalableVectors(
283     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
284     cl::desc(
285         "Pretend that scalable vectors are supported, even if the target does "
286         "not support them. This flag should only be used for testing."));
287 
288 static cl::opt<unsigned> SmallLoopCost(
289     "small-loop-cost", cl::init(20), cl::Hidden,
290     cl::desc(
291         "The cost of a loop that is considered 'small' by the interleaver."));
292 
293 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
294     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
295     cl::desc("Enable the use of the block frequency analysis to access PGO "
296              "heuristics minimizing code growth in cold regions and being more "
297              "aggressive in hot regions."));
298 
299 // Runtime interleave loops for load/store throughput.
300 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
301     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
302     cl::desc(
303         "Enable runtime interleaving until load/store ports are saturated"));
304 
305 /// Interleave small loops with scalar reductions.
306 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
307     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
308     cl::desc("Enable interleaving for loops with small iteration counts that "
309              "contain scalar reductions to expose ILP."));
310 
311 /// The number of stores in a loop that are allowed to need predication.
312 static cl::opt<unsigned> NumberOfStoresToPredicate(
313     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
314     cl::desc("Max number of stores to be predicated behind an if."));
315 
316 static cl::opt<bool> EnableIndVarRegisterHeur(
317     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
318     cl::desc("Count the induction variable only once when interleaving"));
319 
320 static cl::opt<bool> EnableCondStoresVectorization(
321     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
322     cl::desc("Enable if predication of stores during vectorization."));
323 
324 static cl::opt<unsigned> MaxNestedScalarReductionIC(
325     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
326     cl::desc("The maximum interleave count to use when interleaving a scalar "
327              "reduction in a nested loop."));
328 
329 static cl::opt<bool>
330     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
331                            cl::Hidden,
332                            cl::desc("Prefer in-loop vector reductions, "
333                                     "overriding the targets preference."));
334 
335 cl::opt<bool> EnableStrictReductions(
336     "enable-strict-reductions", cl::init(false), cl::Hidden,
337     cl::desc("Enable the vectorisation of loops with in-order (strict) "
338              "FP reductions"));
339 
340 static cl::opt<bool> PreferPredicatedReductionSelect(
341     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
342     cl::desc(
343         "Prefer predicating a reduction operation over an after loop select."));
344 
345 cl::opt<bool> EnableVPlanNativePath(
346     "enable-vplan-native-path", cl::init(false), cl::Hidden,
347     cl::desc("Enable VPlan-native vectorization path with "
348              "support for outer loop vectorization."));
349 
350 // FIXME: Remove this switch once we have divergence analysis. Currently we
351 // assume divergent non-backedge branches when this switch is true.
352 cl::opt<bool> EnableVPlanPredication(
353     "enable-vplan-predication", cl::init(false), cl::Hidden,
354     cl::desc("Enable VPlan-native vectorization path predicator with "
355              "support for outer loop vectorization."));
356 
357 // This flag enables the stress testing of the VPlan H-CFG construction in the
358 // VPlan-native vectorization path. It must be used in conjuction with
359 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
360 // verification of the H-CFGs built.
361 static cl::opt<bool> VPlanBuildStressTest(
362     "vplan-build-stress-test", cl::init(false), cl::Hidden,
363     cl::desc(
364         "Build VPlan for every supported loop nest in the function and bail "
365         "out right after the build (stress test the VPlan H-CFG construction "
366         "in the VPlan-native vectorization path)."));
367 
368 cl::opt<bool> llvm::EnableLoopInterleaving(
369     "interleave-loops", cl::init(true), cl::Hidden,
370     cl::desc("Enable loop interleaving in Loop vectorization passes"));
371 cl::opt<bool> llvm::EnableLoopVectorization(
372     "vectorize-loops", cl::init(true), cl::Hidden,
373     cl::desc("Run the Loop vectorization passes"));
374 
375 cl::opt<bool> PrintVPlansInDotFormat(
376     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
377     cl::desc("Use dot format instead of plain text when dumping VPlans"));
378 
379 /// A helper function that returns true if the given type is irregular. The
380 /// type is irregular if its allocated size doesn't equal the store size of an
381 /// element of the corresponding vector type.
382 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
383   // Determine if an array of N elements of type Ty is "bitcast compatible"
384   // with a <N x Ty> vector.
385   // This is only true if there is no padding between the array elements.
386   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
387 }
388 
389 /// A helper function that returns the reciprocal of the block probability of
390 /// predicated blocks. If we return X, we are assuming the predicated block
391 /// will execute once for every X iterations of the loop header.
392 ///
393 /// TODO: We should use actual block probability here, if available. Currently,
394 ///       we always assume predicated blocks have a 50% chance of executing.
395 static unsigned getReciprocalPredBlockProb() { return 2; }
396 
397 /// A helper function that returns an integer or floating-point constant with
398 /// value C.
399 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
400   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
401                            : ConstantFP::get(Ty, C);
402 }
403 
404 /// Returns "best known" trip count for the specified loop \p L as defined by
405 /// the following procedure:
406 ///   1) Returns exact trip count if it is known.
407 ///   2) Returns expected trip count according to profile data if any.
408 ///   3) Returns upper bound estimate if it is known.
409 ///   4) Returns None if all of the above failed.
410 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
411   // Check if exact trip count is known.
412   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
413     return ExpectedTC;
414 
415   // Check if there is an expected trip count available from profile data.
416   if (LoopVectorizeWithBlockFrequency)
417     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
418       return EstimatedTC;
419 
420   // Check if upper bound estimate is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
422     return ExpectedTC;
423 
424   return None;
425 }
426 
427 // Forward declare GeneratedRTChecks.
428 class GeneratedRTChecks;
429 
430 namespace llvm {
431 
432 /// InnerLoopVectorizer vectorizes loops which contain only one basic
433 /// block to a specified vectorization factor (VF).
434 /// This class performs the widening of scalars into vectors, or multiple
435 /// scalars. This class also implements the following features:
436 /// * It inserts an epilogue loop for handling loops that don't have iteration
437 ///   counts that are known to be a multiple of the vectorization factor.
438 /// * It handles the code generation for reduction variables.
439 /// * Scalarization (implementation using scalars) of un-vectorizable
440 ///   instructions.
441 /// InnerLoopVectorizer does not perform any vectorization-legality
442 /// checks, and relies on the caller to check for the different legality
443 /// aspects. The InnerLoopVectorizer relies on the
444 /// LoopVectorizationLegality class to provide information about the induction
445 /// and reduction variables that were found to a given vectorization factor.
446 class InnerLoopVectorizer {
447 public:
448   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
449                       LoopInfo *LI, DominatorTree *DT,
450                       const TargetLibraryInfo *TLI,
451                       const TargetTransformInfo *TTI, AssumptionCache *AC,
452                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
453                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
454                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
455                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
456       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
457         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
458         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
459         PSI(PSI), RTChecks(RTChecks) {
460     // Query this against the original loop and save it here because the profile
461     // of the original loop header may change as the transformation happens.
462     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
463         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
464   }
465 
466   virtual ~InnerLoopVectorizer() = default;
467 
468   /// Create a new empty loop that will contain vectorized instructions later
469   /// on, while the old loop will be used as the scalar remainder. Control flow
470   /// is generated around the vectorized (and scalar epilogue) loops consisting
471   /// of various checks and bypasses. Return the pre-header block of the new
472   /// loop.
473   /// In the case of epilogue vectorization, this function is overriden to
474   /// handle the more complex control flow around the loops.
475   virtual BasicBlock *createVectorizedLoopSkeleton();
476 
477   /// Widen a single instruction within the innermost loop.
478   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
479                         VPTransformState &State);
480 
481   /// Widen a single call instruction within the innermost loop.
482   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
483                             VPTransformState &State);
484 
485   /// Widen a single select instruction within the innermost loop.
486   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
487                               bool InvariantCond, VPTransformState &State);
488 
489   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
490   void fixVectorizedLoop(VPTransformState &State);
491 
492   // Return true if any runtime check is added.
493   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
494 
495   /// A type for vectorized values in the new loop. Each value from the
496   /// original loop, when vectorized, is represented by UF vector values in the
497   /// new unrolled loop, where UF is the unroll factor.
498   using VectorParts = SmallVector<Value *, 2>;
499 
500   /// Vectorize a single GetElementPtrInst based on information gathered and
501   /// decisions taken during planning.
502   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
503                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
504                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
505 
506   /// Vectorize a single first-order recurrence or pointer induction PHINode in
507   /// a block. This method handles the induction variable canonicalization. It
508   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
509   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
510                            VPTransformState &State);
511 
512   /// A helper function to scalarize a single Instruction in the innermost loop.
513   /// Generates a sequence of scalar instances for each lane between \p MinLane
514   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
515   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
516   /// Instr's operands.
517   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
518                             const VPIteration &Instance, bool IfPredicateInstr,
519                             VPTransformState &State);
520 
521   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
522   /// is provided, the integer induction variable will first be truncated to
523   /// the corresponding type.
524   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
525                              VPValue *Def, VPValue *CastDef,
526                              VPTransformState &State);
527 
528   /// Construct the vector value of a scalarized value \p V one lane at a time.
529   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
530                                  VPTransformState &State);
531 
532   /// Try to vectorize interleaved access group \p Group with the base address
533   /// given in \p Addr, optionally masking the vector operations if \p
534   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
535   /// values in the vectorized loop.
536   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
537                                 ArrayRef<VPValue *> VPDefs,
538                                 VPTransformState &State, VPValue *Addr,
539                                 ArrayRef<VPValue *> StoredValues,
540                                 VPValue *BlockInMask = nullptr);
541 
542   /// Vectorize Load and Store instructions with the base address given in \p
543   /// Addr, optionally masking the vector operations if \p BlockInMask is
544   /// non-null. Use \p State to translate given VPValues to IR values in the
545   /// vectorized loop.
546   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
547                                   VPValue *Def, VPValue *Addr,
548                                   VPValue *StoredValue, VPValue *BlockInMask);
549 
550   /// Set the debug location in the builder \p Ptr using the debug location in
551   /// \p V. If \p Ptr is None then it uses the class member's Builder.
552   void setDebugLocFromInst(const Value *V,
553                            Optional<IRBuilder<> *> CustomBuilder = None);
554 
555   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
556   void fixNonInductionPHIs(VPTransformState &State);
557 
558   /// Returns true if the reordering of FP operations is not allowed, but we are
559   /// able to vectorize with strict in-order reductions for the given RdxDesc.
560   bool useOrderedReductions(RecurrenceDescriptor &RdxDesc);
561 
562   /// Create a broadcast instruction. This method generates a broadcast
563   /// instruction (shuffle) for loop invariant values and for the induction
564   /// value. If this is the induction variable then we extend it to N, N+1, ...
565   /// this is needed because each iteration in the loop corresponds to a SIMD
566   /// element.
567   virtual Value *getBroadcastInstrs(Value *V);
568 
569 protected:
570   friend class LoopVectorizationPlanner;
571 
572   /// A small list of PHINodes.
573   using PhiVector = SmallVector<PHINode *, 4>;
574 
575   /// A type for scalarized values in the new loop. Each value from the
576   /// original loop, when scalarized, is represented by UF x VF scalar values
577   /// in the new unrolled loop, where UF is the unroll factor and VF is the
578   /// vectorization factor.
579   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
580 
581   /// Set up the values of the IVs correctly when exiting the vector loop.
582   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
583                     Value *CountRoundDown, Value *EndValue,
584                     BasicBlock *MiddleBlock);
585 
586   /// Create a new induction variable inside L.
587   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
588                                    Value *Step, Instruction *DL);
589 
590   /// Handle all cross-iteration phis in the header.
591   void fixCrossIterationPHIs(VPTransformState &State);
592 
593   /// Fix a first-order recurrence. This is the second phase of vectorizing
594   /// this phi node.
595   void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State);
596 
597   /// Fix a reduction cross-iteration phi. This is the second phase of
598   /// vectorizing this phi node.
599   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
600 
601   /// Clear NSW/NUW flags from reduction instructions if necessary.
602   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
603                                VPTransformState &State);
604 
605   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
606   /// means we need to add the appropriate incoming value from the middle
607   /// block as exiting edges from the scalar epilogue loop (if present) are
608   /// already in place, and we exit the vector loop exclusively to the middle
609   /// block.
610   void fixLCSSAPHIs(VPTransformState &State);
611 
612   /// Iteratively sink the scalarized operands of a predicated instruction into
613   /// the block that was created for it.
614   void sinkScalarOperands(Instruction *PredInst);
615 
616   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
617   /// represented as.
618   void truncateToMinimalBitwidths(VPTransformState &State);
619 
620   /// This function adds
621   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
622   /// to each vector element of Val. The sequence starts at StartIndex.
623   /// \p Opcode is relevant for FP induction variable.
624   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
625                                Instruction::BinaryOps Opcode =
626                                Instruction::BinaryOpsEnd);
627 
628   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
629   /// variable on which to base the steps, \p Step is the size of the step, and
630   /// \p EntryVal is the value from the original loop that maps to the steps.
631   /// Note that \p EntryVal doesn't have to be an induction variable - it
632   /// can also be a truncate instruction.
633   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
634                         const InductionDescriptor &ID, VPValue *Def,
635                         VPValue *CastDef, VPTransformState &State);
636 
637   /// Create a vector induction phi node based on an existing scalar one. \p
638   /// EntryVal is the value from the original loop that maps to the vector phi
639   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
640   /// truncate instruction, instead of widening the original IV, we widen a
641   /// version of the IV truncated to \p EntryVal's type.
642   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
643                                        Value *Step, Value *Start,
644                                        Instruction *EntryVal, VPValue *Def,
645                                        VPValue *CastDef,
646                                        VPTransformState &State);
647 
648   /// Returns true if an instruction \p I should be scalarized instead of
649   /// vectorized for the chosen vectorization factor.
650   bool shouldScalarizeInstruction(Instruction *I) const;
651 
652   /// Returns true if we should generate a scalar version of \p IV.
653   bool needsScalarInduction(Instruction *IV) const;
654 
655   /// If there is a cast involved in the induction variable \p ID, which should
656   /// be ignored in the vectorized loop body, this function records the
657   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
658   /// cast. We had already proved that the casted Phi is equal to the uncasted
659   /// Phi in the vectorized loop (under a runtime guard), and therefore
660   /// there is no need to vectorize the cast - the same value can be used in the
661   /// vector loop for both the Phi and the cast.
662   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
663   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
664   ///
665   /// \p EntryVal is the value from the original loop that maps to the vector
666   /// phi node and is used to distinguish what is the IV currently being
667   /// processed - original one (if \p EntryVal is a phi corresponding to the
668   /// original IV) or the "newly-created" one based on the proof mentioned above
669   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
670   /// latter case \p EntryVal is a TruncInst and we must not record anything for
671   /// that IV, but it's error-prone to expect callers of this routine to care
672   /// about that, hence this explicit parameter.
673   void recordVectorLoopValueForInductionCast(
674       const InductionDescriptor &ID, const Instruction *EntryVal,
675       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
676       unsigned Part, unsigned Lane = UINT_MAX);
677 
678   /// Generate a shuffle sequence that will reverse the vector Vec.
679   virtual Value *reverseVector(Value *Vec);
680 
681   /// Returns (and creates if needed) the original loop trip count.
682   Value *getOrCreateTripCount(Loop *NewLoop);
683 
684   /// Returns (and creates if needed) the trip count of the widened loop.
685   Value *getOrCreateVectorTripCount(Loop *NewLoop);
686 
687   /// Returns a bitcasted value to the requested vector type.
688   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
689   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
690                                 const DataLayout &DL);
691 
692   /// Emit a bypass check to see if the vector trip count is zero, including if
693   /// it overflows.
694   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
695 
696   /// Emit a bypass check to see if all of the SCEV assumptions we've
697   /// had to make are correct. Returns the block containing the checks or
698   /// nullptr if no checks have been added.
699   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
700 
701   /// Emit bypass checks to check any memory assumptions we may have made.
702   /// Returns the block containing the checks or nullptr if no checks have been
703   /// added.
704   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
705 
706   /// Compute the transformed value of Index at offset StartValue using step
707   /// StepValue.
708   /// For integer induction, returns StartValue + Index * StepValue.
709   /// For pointer induction, returns StartValue[Index * StepValue].
710   /// FIXME: The newly created binary instructions should contain nsw/nuw
711   /// flags, which can be found from the original scalar operations.
712   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
713                               const DataLayout &DL,
714                               const InductionDescriptor &ID) const;
715 
716   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
717   /// vector loop preheader, middle block and scalar preheader. Also
718   /// allocate a loop object for the new vector loop and return it.
719   Loop *createVectorLoopSkeleton(StringRef Prefix);
720 
721   /// Create new phi nodes for the induction variables to resume iteration count
722   /// in the scalar epilogue, from where the vectorized loop left off (given by
723   /// \p VectorTripCount).
724   /// In cases where the loop skeleton is more complicated (eg. epilogue
725   /// vectorization) and the resume values can come from an additional bypass
726   /// block, the \p AdditionalBypass pair provides information about the bypass
727   /// block and the end value on the edge from bypass to this loop.
728   void createInductionResumeValues(
729       Loop *L, Value *VectorTripCount,
730       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
731 
732   /// Complete the loop skeleton by adding debug MDs, creating appropriate
733   /// conditional branches in the middle block, preparing the builder and
734   /// running the verifier. Take in the vector loop \p L as argument, and return
735   /// the preheader of the completed vector loop.
736   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
737 
738   /// Add additional metadata to \p To that was not present on \p Orig.
739   ///
740   /// Currently this is used to add the noalias annotations based on the
741   /// inserted memchecks.  Use this for instructions that are *cloned* into the
742   /// vector loop.
743   void addNewMetadata(Instruction *To, const Instruction *Orig);
744 
745   /// Add metadata from one instruction to another.
746   ///
747   /// This includes both the original MDs from \p From and additional ones (\see
748   /// addNewMetadata).  Use this for *newly created* instructions in the vector
749   /// loop.
750   void addMetadata(Instruction *To, Instruction *From);
751 
752   /// Similar to the previous function but it adds the metadata to a
753   /// vector of instructions.
754   void addMetadata(ArrayRef<Value *> To, Instruction *From);
755 
756   /// Allow subclasses to override and print debug traces before/after vplan
757   /// execution, when trace information is requested.
758   virtual void printDebugTracesAtStart(){};
759   virtual void printDebugTracesAtEnd(){};
760 
761   /// The original loop.
762   Loop *OrigLoop;
763 
764   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
765   /// dynamic knowledge to simplify SCEV expressions and converts them to a
766   /// more usable form.
767   PredicatedScalarEvolution &PSE;
768 
769   /// Loop Info.
770   LoopInfo *LI;
771 
772   /// Dominator Tree.
773   DominatorTree *DT;
774 
775   /// Alias Analysis.
776   AAResults *AA;
777 
778   /// Target Library Info.
779   const TargetLibraryInfo *TLI;
780 
781   /// Target Transform Info.
782   const TargetTransformInfo *TTI;
783 
784   /// Assumption Cache.
785   AssumptionCache *AC;
786 
787   /// Interface to emit optimization remarks.
788   OptimizationRemarkEmitter *ORE;
789 
790   /// LoopVersioning.  It's only set up (non-null) if memchecks were
791   /// used.
792   ///
793   /// This is currently only used to add no-alias metadata based on the
794   /// memchecks.  The actually versioning is performed manually.
795   std::unique_ptr<LoopVersioning> LVer;
796 
797   /// The vectorization SIMD factor to use. Each vector will have this many
798   /// vector elements.
799   ElementCount VF;
800 
801   /// The vectorization unroll factor to use. Each scalar is vectorized to this
802   /// many different vector instructions.
803   unsigned UF;
804 
805   /// The builder that we use
806   IRBuilder<> Builder;
807 
808   // --- Vectorization state ---
809 
810   /// The vector-loop preheader.
811   BasicBlock *LoopVectorPreHeader;
812 
813   /// The scalar-loop preheader.
814   BasicBlock *LoopScalarPreHeader;
815 
816   /// Middle Block between the vector and the scalar.
817   BasicBlock *LoopMiddleBlock;
818 
819   /// The (unique) ExitBlock of the scalar loop.  Note that
820   /// there can be multiple exiting edges reaching this block.
821   BasicBlock *LoopExitBlock;
822 
823   /// The vector loop body.
824   BasicBlock *LoopVectorBody;
825 
826   /// The scalar loop body.
827   BasicBlock *LoopScalarBody;
828 
829   /// A list of all bypass blocks. The first block is the entry of the loop.
830   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
831 
832   /// The new Induction variable which was added to the new block.
833   PHINode *Induction = nullptr;
834 
835   /// The induction variable of the old basic block.
836   PHINode *OldInduction = nullptr;
837 
838   /// Store instructions that were predicated.
839   SmallVector<Instruction *, 4> PredicatedInstructions;
840 
841   /// Trip count of the original loop.
842   Value *TripCount = nullptr;
843 
844   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
845   Value *VectorTripCount = nullptr;
846 
847   /// The legality analysis.
848   LoopVectorizationLegality *Legal;
849 
850   /// The profitablity analysis.
851   LoopVectorizationCostModel *Cost;
852 
853   // Record whether runtime checks are added.
854   bool AddedSafetyChecks = false;
855 
856   // Holds the end values for each induction variable. We save the end values
857   // so we can later fix-up the external users of the induction variables.
858   DenseMap<PHINode *, Value *> IVEndValues;
859 
860   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
861   // fixed up at the end of vector code generation.
862   SmallVector<PHINode *, 8> OrigPHIsToFix;
863 
864   /// BFI and PSI are used to check for profile guided size optimizations.
865   BlockFrequencyInfo *BFI;
866   ProfileSummaryInfo *PSI;
867 
868   // Whether this loop should be optimized for size based on profile guided size
869   // optimizatios.
870   bool OptForSizeBasedOnProfile;
871 
872   /// Structure to hold information about generated runtime checks, responsible
873   /// for cleaning the checks, if vectorization turns out unprofitable.
874   GeneratedRTChecks &RTChecks;
875 };
876 
877 class InnerLoopUnroller : public InnerLoopVectorizer {
878 public:
879   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
880                     LoopInfo *LI, DominatorTree *DT,
881                     const TargetLibraryInfo *TLI,
882                     const TargetTransformInfo *TTI, AssumptionCache *AC,
883                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
884                     LoopVectorizationLegality *LVL,
885                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
886                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
887       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
888                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
889                             BFI, PSI, Check) {}
890 
891 private:
892   Value *getBroadcastInstrs(Value *V) override;
893   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
894                        Instruction::BinaryOps Opcode =
895                        Instruction::BinaryOpsEnd) override;
896   Value *reverseVector(Value *Vec) override;
897 };
898 
899 /// Encapsulate information regarding vectorization of a loop and its epilogue.
900 /// This information is meant to be updated and used across two stages of
901 /// epilogue vectorization.
902 struct EpilogueLoopVectorizationInfo {
903   ElementCount MainLoopVF = ElementCount::getFixed(0);
904   unsigned MainLoopUF = 0;
905   ElementCount EpilogueVF = ElementCount::getFixed(0);
906   unsigned EpilogueUF = 0;
907   BasicBlock *MainLoopIterationCountCheck = nullptr;
908   BasicBlock *EpilogueIterationCountCheck = nullptr;
909   BasicBlock *SCEVSafetyCheck = nullptr;
910   BasicBlock *MemSafetyCheck = nullptr;
911   Value *TripCount = nullptr;
912   Value *VectorTripCount = nullptr;
913 
914   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
915                                 unsigned EUF)
916       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
917         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
918     assert(EUF == 1 &&
919            "A high UF for the epilogue loop is likely not beneficial.");
920   }
921 };
922 
923 /// An extension of the inner loop vectorizer that creates a skeleton for a
924 /// vectorized loop that has its epilogue (residual) also vectorized.
925 /// The idea is to run the vplan on a given loop twice, firstly to setup the
926 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
927 /// from the first step and vectorize the epilogue.  This is achieved by
928 /// deriving two concrete strategy classes from this base class and invoking
929 /// them in succession from the loop vectorizer planner.
930 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
931 public:
932   InnerLoopAndEpilogueVectorizer(
933       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
934       DominatorTree *DT, const TargetLibraryInfo *TLI,
935       const TargetTransformInfo *TTI, AssumptionCache *AC,
936       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
937       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
938       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
939       GeneratedRTChecks &Checks)
940       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
941                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
942                             Checks),
943         EPI(EPI) {}
944 
945   // Override this function to handle the more complex control flow around the
946   // three loops.
947   BasicBlock *createVectorizedLoopSkeleton() final override {
948     return createEpilogueVectorizedLoopSkeleton();
949   }
950 
951   /// The interface for creating a vectorized skeleton using one of two
952   /// different strategies, each corresponding to one execution of the vplan
953   /// as described above.
954   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
955 
956   /// Holds and updates state information required to vectorize the main loop
957   /// and its epilogue in two separate passes. This setup helps us avoid
958   /// regenerating and recomputing runtime safety checks. It also helps us to
959   /// shorten the iteration-count-check path length for the cases where the
960   /// iteration count of the loop is so small that the main vector loop is
961   /// completely skipped.
962   EpilogueLoopVectorizationInfo &EPI;
963 };
964 
965 /// A specialized derived class of inner loop vectorizer that performs
966 /// vectorization of *main* loops in the process of vectorizing loops and their
967 /// epilogues.
968 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
969 public:
970   EpilogueVectorizerMainLoop(
971       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
972       DominatorTree *DT, const TargetLibraryInfo *TLI,
973       const TargetTransformInfo *TTI, AssumptionCache *AC,
974       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
975       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
976       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
977       GeneratedRTChecks &Check)
978       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
979                                        EPI, LVL, CM, BFI, PSI, Check) {}
980   /// Implements the interface for creating a vectorized skeleton using the
981   /// *main loop* strategy (ie the first pass of vplan execution).
982   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
983 
984 protected:
985   /// Emits an iteration count bypass check once for the main loop (when \p
986   /// ForEpilogue is false) and once for the epilogue loop (when \p
987   /// ForEpilogue is true).
988   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
989                                              bool ForEpilogue);
990   void printDebugTracesAtStart() override;
991   void printDebugTracesAtEnd() override;
992 };
993 
994 // A specialized derived class of inner loop vectorizer that performs
995 // vectorization of *epilogue* loops in the process of vectorizing loops and
996 // their epilogues.
997 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
998 public:
999   EpilogueVectorizerEpilogueLoop(
1000       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1001       DominatorTree *DT, const TargetLibraryInfo *TLI,
1002       const TargetTransformInfo *TTI, AssumptionCache *AC,
1003       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1004       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1005       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1006       GeneratedRTChecks &Checks)
1007       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1008                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1009   /// Implements the interface for creating a vectorized skeleton using the
1010   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1011   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1012 
1013 protected:
1014   /// Emits an iteration count bypass check after the main vector loop has
1015   /// finished to see if there are any iterations left to execute by either
1016   /// the vector epilogue or the scalar epilogue.
1017   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1018                                                       BasicBlock *Bypass,
1019                                                       BasicBlock *Insert);
1020   void printDebugTracesAtStart() override;
1021   void printDebugTracesAtEnd() override;
1022 };
1023 } // end namespace llvm
1024 
1025 /// Look for a meaningful debug location on the instruction or it's
1026 /// operands.
1027 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1028   if (!I)
1029     return I;
1030 
1031   DebugLoc Empty;
1032   if (I->getDebugLoc() != Empty)
1033     return I;
1034 
1035   for (Use &Op : I->operands()) {
1036     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1037       if (OpInst->getDebugLoc() != Empty)
1038         return OpInst;
1039   }
1040 
1041   return I;
1042 }
1043 
1044 void InnerLoopVectorizer::setDebugLocFromInst(
1045     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
1046   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
1047   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
1048     const DILocation *DIL = Inst->getDebugLoc();
1049 
1050     // When a FSDiscriminator is enabled, we don't need to add the multiply
1051     // factors to the discriminators.
1052     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1053         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1054       // FIXME: For scalable vectors, assume vscale=1.
1055       auto NewDIL =
1056           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1057       if (NewDIL)
1058         B->SetCurrentDebugLocation(NewDIL.getValue());
1059       else
1060         LLVM_DEBUG(dbgs()
1061                    << "Failed to create new discriminator: "
1062                    << DIL->getFilename() << " Line: " << DIL->getLine());
1063     } else
1064       B->SetCurrentDebugLocation(DIL);
1065   } else
1066     B->SetCurrentDebugLocation(DebugLoc());
1067 }
1068 
1069 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1070 /// is passed, the message relates to that particular instruction.
1071 #ifndef NDEBUG
1072 static void debugVectorizationMessage(const StringRef Prefix,
1073                                       const StringRef DebugMsg,
1074                                       Instruction *I) {
1075   dbgs() << "LV: " << Prefix << DebugMsg;
1076   if (I != nullptr)
1077     dbgs() << " " << *I;
1078   else
1079     dbgs() << '.';
1080   dbgs() << '\n';
1081 }
1082 #endif
1083 
1084 /// Create an analysis remark that explains why vectorization failed
1085 ///
1086 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1087 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1088 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1089 /// the location of the remark.  \return the remark object that can be
1090 /// streamed to.
1091 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1092     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1093   Value *CodeRegion = TheLoop->getHeader();
1094   DebugLoc DL = TheLoop->getStartLoc();
1095 
1096   if (I) {
1097     CodeRegion = I->getParent();
1098     // If there is no debug location attached to the instruction, revert back to
1099     // using the loop's.
1100     if (I->getDebugLoc())
1101       DL = I->getDebugLoc();
1102   }
1103 
1104   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1105 }
1106 
1107 /// Return a value for Step multiplied by VF.
1108 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1109   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1110   Constant *StepVal = ConstantInt::get(
1111       Step->getType(),
1112       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1113   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1114 }
1115 
1116 namespace llvm {
1117 
1118 /// Return the runtime value for VF.
1119 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1120   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1121   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1122 }
1123 
1124 void reportVectorizationFailure(const StringRef DebugMsg,
1125                                 const StringRef OREMsg, const StringRef ORETag,
1126                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1127                                 Instruction *I) {
1128   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1129   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1130   ORE->emit(
1131       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1132       << "loop not vectorized: " << OREMsg);
1133 }
1134 
1135 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1136                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1137                              Instruction *I) {
1138   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1139   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1140   ORE->emit(
1141       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1142       << Msg);
1143 }
1144 
1145 } // end namespace llvm
1146 
1147 #ifndef NDEBUG
1148 /// \return string containing a file name and a line # for the given loop.
1149 static std::string getDebugLocString(const Loop *L) {
1150   std::string Result;
1151   if (L) {
1152     raw_string_ostream OS(Result);
1153     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1154       LoopDbgLoc.print(OS);
1155     else
1156       // Just print the module name.
1157       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1158     OS.flush();
1159   }
1160   return Result;
1161 }
1162 #endif
1163 
1164 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1165                                          const Instruction *Orig) {
1166   // If the loop was versioned with memchecks, add the corresponding no-alias
1167   // metadata.
1168   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1169     LVer->annotateInstWithNoAlias(To, Orig);
1170 }
1171 
1172 void InnerLoopVectorizer::addMetadata(Instruction *To,
1173                                       Instruction *From) {
1174   propagateMetadata(To, From);
1175   addNewMetadata(To, From);
1176 }
1177 
1178 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1179                                       Instruction *From) {
1180   for (Value *V : To) {
1181     if (Instruction *I = dyn_cast<Instruction>(V))
1182       addMetadata(I, From);
1183   }
1184 }
1185 
1186 namespace llvm {
1187 
1188 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1189 // lowered.
1190 enum ScalarEpilogueLowering {
1191 
1192   // The default: allowing scalar epilogues.
1193   CM_ScalarEpilogueAllowed,
1194 
1195   // Vectorization with OptForSize: don't allow epilogues.
1196   CM_ScalarEpilogueNotAllowedOptSize,
1197 
1198   // A special case of vectorisation with OptForSize: loops with a very small
1199   // trip count are considered for vectorization under OptForSize, thereby
1200   // making sure the cost of their loop body is dominant, free of runtime
1201   // guards and scalar iteration overheads.
1202   CM_ScalarEpilogueNotAllowedLowTripLoop,
1203 
1204   // Loop hint predicate indicating an epilogue is undesired.
1205   CM_ScalarEpilogueNotNeededUsePredicate,
1206 
1207   // Directive indicating we must either tail fold or not vectorize
1208   CM_ScalarEpilogueNotAllowedUsePredicate
1209 };
1210 
1211 /// ElementCountComparator creates a total ordering for ElementCount
1212 /// for the purposes of using it in a set structure.
1213 struct ElementCountComparator {
1214   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1215     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1216            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1217   }
1218 };
1219 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1220 
1221 /// LoopVectorizationCostModel - estimates the expected speedups due to
1222 /// vectorization.
1223 /// In many cases vectorization is not profitable. This can happen because of
1224 /// a number of reasons. In this class we mainly attempt to predict the
1225 /// expected speedup/slowdowns due to the supported instruction set. We use the
1226 /// TargetTransformInfo to query the different backends for the cost of
1227 /// different operations.
1228 class LoopVectorizationCostModel {
1229 public:
1230   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1231                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1232                              LoopVectorizationLegality *Legal,
1233                              const TargetTransformInfo &TTI,
1234                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1235                              AssumptionCache *AC,
1236                              OptimizationRemarkEmitter *ORE, const Function *F,
1237                              const LoopVectorizeHints *Hints,
1238                              InterleavedAccessInfo &IAI)
1239       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1240         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1241         Hints(Hints), InterleaveInfo(IAI) {}
1242 
1243   /// \return An upper bound for the vectorization factors (both fixed and
1244   /// scalable). If the factors are 0, vectorization and interleaving should be
1245   /// avoided up front.
1246   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1247 
1248   /// \return True if runtime checks are required for vectorization, and false
1249   /// otherwise.
1250   bool runtimeChecksRequired();
1251 
1252   /// \return The most profitable vectorization factor and the cost of that VF.
1253   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1254   /// then this vectorization factor will be selected if vectorization is
1255   /// possible.
1256   VectorizationFactor
1257   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1258 
1259   VectorizationFactor
1260   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1261                                     const LoopVectorizationPlanner &LVP);
1262 
1263   /// Setup cost-based decisions for user vectorization factor.
1264   void selectUserVectorizationFactor(ElementCount UserVF) {
1265     collectUniformsAndScalars(UserVF);
1266     collectInstsToScalarize(UserVF);
1267   }
1268 
1269   /// \return The size (in bits) of the smallest and widest types in the code
1270   /// that needs to be vectorized. We ignore values that remain scalar such as
1271   /// 64 bit loop indices.
1272   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1273 
1274   /// \return The desired interleave count.
1275   /// If interleave count has been specified by metadata it will be returned.
1276   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1277   /// are the selected vectorization factor and the cost of the selected VF.
1278   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1279 
1280   /// Memory access instruction may be vectorized in more than one way.
1281   /// Form of instruction after vectorization depends on cost.
1282   /// This function takes cost-based decisions for Load/Store instructions
1283   /// and collects them in a map. This decisions map is used for building
1284   /// the lists of loop-uniform and loop-scalar instructions.
1285   /// The calculated cost is saved with widening decision in order to
1286   /// avoid redundant calculations.
1287   void setCostBasedWideningDecision(ElementCount VF);
1288 
1289   /// A struct that represents some properties of the register usage
1290   /// of a loop.
1291   struct RegisterUsage {
1292     /// Holds the number of loop invariant values that are used in the loop.
1293     /// The key is ClassID of target-provided register class.
1294     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1295     /// Holds the maximum number of concurrent live intervals in the loop.
1296     /// The key is ClassID of target-provided register class.
1297     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1298   };
1299 
1300   /// \return Returns information about the register usages of the loop for the
1301   /// given vectorization factors.
1302   SmallVector<RegisterUsage, 8>
1303   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1304 
1305   /// Collect values we want to ignore in the cost model.
1306   void collectValuesToIgnore();
1307 
1308   /// Collect all element types in the loop for which widening is needed.
1309   void collectElementTypesForWidening();
1310 
1311   /// Split reductions into those that happen in the loop, and those that happen
1312   /// outside. In loop reductions are collected into InLoopReductionChains.
1313   void collectInLoopReductions();
1314 
1315   /// Returns true if we should use strict in-order reductions for the given
1316   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1317   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1318   /// of FP operations.
1319   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1320     return EnableStrictReductions && !Hints->allowReordering() &&
1321            RdxDesc.isOrdered();
1322   }
1323 
1324   /// \returns The smallest bitwidth each instruction can be represented with.
1325   /// The vector equivalents of these instructions should be truncated to this
1326   /// type.
1327   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1328     return MinBWs;
1329   }
1330 
1331   /// \returns True if it is more profitable to scalarize instruction \p I for
1332   /// vectorization factor \p VF.
1333   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1334     assert(VF.isVector() &&
1335            "Profitable to scalarize relevant only for VF > 1.");
1336 
1337     // Cost model is not run in the VPlan-native path - return conservative
1338     // result until this changes.
1339     if (EnableVPlanNativePath)
1340       return false;
1341 
1342     auto Scalars = InstsToScalarize.find(VF);
1343     assert(Scalars != InstsToScalarize.end() &&
1344            "VF not yet analyzed for scalarization profitability");
1345     return Scalars->second.find(I) != Scalars->second.end();
1346   }
1347 
1348   /// Returns true if \p I is known to be uniform after vectorization.
1349   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1350     if (VF.isScalar())
1351       return true;
1352 
1353     // Cost model is not run in the VPlan-native path - return conservative
1354     // result until this changes.
1355     if (EnableVPlanNativePath)
1356       return false;
1357 
1358     auto UniformsPerVF = Uniforms.find(VF);
1359     assert(UniformsPerVF != Uniforms.end() &&
1360            "VF not yet analyzed for uniformity");
1361     return UniformsPerVF->second.count(I);
1362   }
1363 
1364   /// Returns true if \p I is known to be scalar after vectorization.
1365   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1366     if (VF.isScalar())
1367       return true;
1368 
1369     // Cost model is not run in the VPlan-native path - return conservative
1370     // result until this changes.
1371     if (EnableVPlanNativePath)
1372       return false;
1373 
1374     auto ScalarsPerVF = Scalars.find(VF);
1375     assert(ScalarsPerVF != Scalars.end() &&
1376            "Scalar values are not calculated for VF");
1377     return ScalarsPerVF->second.count(I);
1378   }
1379 
1380   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1381   /// for vectorization factor \p VF.
1382   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1383     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1384            !isProfitableToScalarize(I, VF) &&
1385            !isScalarAfterVectorization(I, VF);
1386   }
1387 
1388   /// Decision that was taken during cost calculation for memory instruction.
1389   enum InstWidening {
1390     CM_Unknown,
1391     CM_Widen,         // For consecutive accesses with stride +1.
1392     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1393     CM_Interleave,
1394     CM_GatherScatter,
1395     CM_Scalarize
1396   };
1397 
1398   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1399   /// instruction \p I and vector width \p VF.
1400   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1401                            InstructionCost Cost) {
1402     assert(VF.isVector() && "Expected VF >=2");
1403     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1404   }
1405 
1406   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1407   /// interleaving group \p Grp and vector width \p VF.
1408   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1409                            ElementCount VF, InstWidening W,
1410                            InstructionCost Cost) {
1411     assert(VF.isVector() && "Expected VF >=2");
1412     /// Broadcast this decicion to all instructions inside the group.
1413     /// But the cost will be assigned to one instruction only.
1414     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1415       if (auto *I = Grp->getMember(i)) {
1416         if (Grp->getInsertPos() == I)
1417           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1418         else
1419           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1420       }
1421     }
1422   }
1423 
1424   /// Return the cost model decision for the given instruction \p I and vector
1425   /// width \p VF. Return CM_Unknown if this instruction did not pass
1426   /// through the cost modeling.
1427   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1428     assert(VF.isVector() && "Expected VF to be a vector VF");
1429     // Cost model is not run in the VPlan-native path - return conservative
1430     // result until this changes.
1431     if (EnableVPlanNativePath)
1432       return CM_GatherScatter;
1433 
1434     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1435     auto Itr = WideningDecisions.find(InstOnVF);
1436     if (Itr == WideningDecisions.end())
1437       return CM_Unknown;
1438     return Itr->second.first;
1439   }
1440 
1441   /// Return the vectorization cost for the given instruction \p I and vector
1442   /// width \p VF.
1443   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1444     assert(VF.isVector() && "Expected VF >=2");
1445     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1446     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1447            "The cost is not calculated");
1448     return WideningDecisions[InstOnVF].second;
1449   }
1450 
1451   /// Return True if instruction \p I is an optimizable truncate whose operand
1452   /// is an induction variable. Such a truncate will be removed by adding a new
1453   /// induction variable with the destination type.
1454   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1455     // If the instruction is not a truncate, return false.
1456     auto *Trunc = dyn_cast<TruncInst>(I);
1457     if (!Trunc)
1458       return false;
1459 
1460     // Get the source and destination types of the truncate.
1461     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1462     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1463 
1464     // If the truncate is free for the given types, return false. Replacing a
1465     // free truncate with an induction variable would add an induction variable
1466     // update instruction to each iteration of the loop. We exclude from this
1467     // check the primary induction variable since it will need an update
1468     // instruction regardless.
1469     Value *Op = Trunc->getOperand(0);
1470     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1471       return false;
1472 
1473     // If the truncated value is not an induction variable, return false.
1474     return Legal->isInductionPhi(Op);
1475   }
1476 
1477   /// Collects the instructions to scalarize for each predicated instruction in
1478   /// the loop.
1479   void collectInstsToScalarize(ElementCount VF);
1480 
1481   /// Collect Uniform and Scalar values for the given \p VF.
1482   /// The sets depend on CM decision for Load/Store instructions
1483   /// that may be vectorized as interleave, gather-scatter or scalarized.
1484   void collectUniformsAndScalars(ElementCount VF) {
1485     // Do the analysis once.
1486     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1487       return;
1488     setCostBasedWideningDecision(VF);
1489     collectLoopUniforms(VF);
1490     collectLoopScalars(VF);
1491   }
1492 
1493   /// Returns true if the target machine supports masked store operation
1494   /// for the given \p DataType and kind of access to \p Ptr.
1495   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1496     return Legal->isConsecutivePtr(Ptr) &&
1497            TTI.isLegalMaskedStore(DataType, Alignment);
1498   }
1499 
1500   /// Returns true if the target machine supports masked load operation
1501   /// for the given \p DataType and kind of access to \p Ptr.
1502   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1503     return Legal->isConsecutivePtr(Ptr) &&
1504            TTI.isLegalMaskedLoad(DataType, Alignment);
1505   }
1506 
1507   /// Returns true if the target machine can represent \p V as a masked gather
1508   /// or scatter operation.
1509   bool isLegalGatherOrScatter(Value *V) {
1510     bool LI = isa<LoadInst>(V);
1511     bool SI = isa<StoreInst>(V);
1512     if (!LI && !SI)
1513       return false;
1514     auto *Ty = getLoadStoreType(V);
1515     Align Align = getLoadStoreAlignment(V);
1516     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1517            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1518   }
1519 
1520   /// Returns true if the target machine supports all of the reduction
1521   /// variables found for the given VF.
1522   bool canVectorizeReductions(ElementCount VF) const {
1523     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1524       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1525       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1526     }));
1527   }
1528 
1529   /// Returns true if \p I is an instruction that will be scalarized with
1530   /// predication. Such instructions include conditional stores and
1531   /// instructions that may divide by zero.
1532   /// If a non-zero VF has been calculated, we check if I will be scalarized
1533   /// predication for that VF.
1534   bool isScalarWithPredication(Instruction *I) const;
1535 
1536   // Returns true if \p I is an instruction that will be predicated either
1537   // through scalar predication or masked load/store or masked gather/scatter.
1538   // Superset of instructions that return true for isScalarWithPredication.
1539   bool isPredicatedInst(Instruction *I) {
1540     if (!blockNeedsPredication(I->getParent()))
1541       return false;
1542     // Loads and stores that need some form of masked operation are predicated
1543     // instructions.
1544     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1545       return Legal->isMaskRequired(I);
1546     return isScalarWithPredication(I);
1547   }
1548 
1549   /// Returns true if \p I is a memory instruction with consecutive memory
1550   /// access that can be widened.
1551   bool
1552   memoryInstructionCanBeWidened(Instruction *I,
1553                                 ElementCount VF = ElementCount::getFixed(1));
1554 
1555   /// Returns true if \p I is a memory instruction in an interleaved-group
1556   /// of memory accesses that can be vectorized with wide vector loads/stores
1557   /// and shuffles.
1558   bool
1559   interleavedAccessCanBeWidened(Instruction *I,
1560                                 ElementCount VF = ElementCount::getFixed(1));
1561 
1562   /// Check if \p Instr belongs to any interleaved access group.
1563   bool isAccessInterleaved(Instruction *Instr) {
1564     return InterleaveInfo.isInterleaved(Instr);
1565   }
1566 
1567   /// Get the interleaved access group that \p Instr belongs to.
1568   const InterleaveGroup<Instruction> *
1569   getInterleavedAccessGroup(Instruction *Instr) {
1570     return InterleaveInfo.getInterleaveGroup(Instr);
1571   }
1572 
1573   /// Returns true if we're required to use a scalar epilogue for at least
1574   /// the final iteration of the original loop.
1575   bool requiresScalarEpilogue(ElementCount VF) const {
1576     if (!isScalarEpilogueAllowed())
1577       return false;
1578     // If we might exit from anywhere but the latch, must run the exiting
1579     // iteration in scalar form.
1580     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1581       return true;
1582     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1583   }
1584 
1585   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1586   /// loop hint annotation.
1587   bool isScalarEpilogueAllowed() const {
1588     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1589   }
1590 
1591   /// Returns true if all loop blocks should be masked to fold tail loop.
1592   bool foldTailByMasking() const { return FoldTailByMasking; }
1593 
1594   bool blockNeedsPredication(BasicBlock *BB) const {
1595     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1596   }
1597 
1598   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1599   /// nodes to the chain of instructions representing the reductions. Uses a
1600   /// MapVector to ensure deterministic iteration order.
1601   using ReductionChainMap =
1602       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1603 
1604   /// Return the chain of instructions representing an inloop reduction.
1605   const ReductionChainMap &getInLoopReductionChains() const {
1606     return InLoopReductionChains;
1607   }
1608 
1609   /// Returns true if the Phi is part of an inloop reduction.
1610   bool isInLoopReduction(PHINode *Phi) const {
1611     return InLoopReductionChains.count(Phi);
1612   }
1613 
1614   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1615   /// with factor VF.  Return the cost of the instruction, including
1616   /// scalarization overhead if it's needed.
1617   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1618 
1619   /// Estimate cost of a call instruction CI if it were vectorized with factor
1620   /// VF. Return the cost of the instruction, including scalarization overhead
1621   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1622   /// scalarized -
1623   /// i.e. either vector version isn't available, or is too expensive.
1624   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1625                                     bool &NeedToScalarize) const;
1626 
1627   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1628   /// that of B.
1629   bool isMoreProfitable(const VectorizationFactor &A,
1630                         const VectorizationFactor &B) const;
1631 
1632   /// Invalidates decisions already taken by the cost model.
1633   void invalidateCostModelingDecisions() {
1634     WideningDecisions.clear();
1635     Uniforms.clear();
1636     Scalars.clear();
1637   }
1638 
1639 private:
1640   unsigned NumPredStores = 0;
1641 
1642   /// \return An upper bound for the vectorization factors for both
1643   /// fixed and scalable vectorization, where the minimum-known number of
1644   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1645   /// disabled or unsupported, then the scalable part will be equal to
1646   /// ElementCount::getScalable(0).
1647   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1648                                            ElementCount UserVF);
1649 
1650   /// \return the maximized element count based on the targets vector
1651   /// registers and the loop trip-count, but limited to a maximum safe VF.
1652   /// This is a helper function of computeFeasibleMaxVF.
1653   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1654   /// issue that occurred on one of the buildbots which cannot be reproduced
1655   /// without having access to the properietary compiler (see comments on
1656   /// D98509). The issue is currently under investigation and this workaround
1657   /// will be removed as soon as possible.
1658   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1659                                        unsigned SmallestType,
1660                                        unsigned WidestType,
1661                                        const ElementCount &MaxSafeVF);
1662 
1663   /// \return the maximum legal scalable VF, based on the safe max number
1664   /// of elements.
1665   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1666 
1667   /// The vectorization cost is a combination of the cost itself and a boolean
1668   /// indicating whether any of the contributing operations will actually
1669   /// operate on vector values after type legalization in the backend. If this
1670   /// latter value is false, then all operations will be scalarized (i.e. no
1671   /// vectorization has actually taken place).
1672   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1673 
1674   /// Returns the expected execution cost. The unit of the cost does
1675   /// not matter because we use the 'cost' units to compare different
1676   /// vector widths. The cost that is returned is *not* normalized by
1677   /// the factor width.
1678   VectorizationCostTy expectedCost(ElementCount VF);
1679 
1680   /// Returns the execution time cost of an instruction for a given vector
1681   /// width. Vector width of one means scalar.
1682   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1683 
1684   /// The cost-computation logic from getInstructionCost which provides
1685   /// the vector type as an output parameter.
1686   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1687                                      Type *&VectorTy);
1688 
1689   /// Return the cost of instructions in an inloop reduction pattern, if I is
1690   /// part of that pattern.
1691   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1692                                           Type *VectorTy,
1693                                           TTI::TargetCostKind CostKind);
1694 
1695   /// Calculate vectorization cost of memory instruction \p I.
1696   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1697 
1698   /// The cost computation for scalarized memory instruction.
1699   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1700 
1701   /// The cost computation for interleaving group of memory instructions.
1702   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1703 
1704   /// The cost computation for Gather/Scatter instruction.
1705   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1706 
1707   /// The cost computation for widening instruction \p I with consecutive
1708   /// memory access.
1709   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1710 
1711   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1712   /// Load: scalar load + broadcast.
1713   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1714   /// element)
1715   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1716 
1717   /// Estimate the overhead of scalarizing an instruction. This is a
1718   /// convenience wrapper for the type-based getScalarizationOverhead API.
1719   InstructionCost getScalarizationOverhead(Instruction *I,
1720                                            ElementCount VF) const;
1721 
1722   /// Returns whether the instruction is a load or store and will be a emitted
1723   /// as a vector operation.
1724   bool isConsecutiveLoadOrStore(Instruction *I);
1725 
1726   /// Returns true if an artificially high cost for emulated masked memrefs
1727   /// should be used.
1728   bool useEmulatedMaskMemRefHack(Instruction *I);
1729 
1730   /// Map of scalar integer values to the smallest bitwidth they can be legally
1731   /// represented as. The vector equivalents of these values should be truncated
1732   /// to this type.
1733   MapVector<Instruction *, uint64_t> MinBWs;
1734 
1735   /// A type representing the costs for instructions if they were to be
1736   /// scalarized rather than vectorized. The entries are Instruction-Cost
1737   /// pairs.
1738   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1739 
1740   /// A set containing all BasicBlocks that are known to present after
1741   /// vectorization as a predicated block.
1742   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1743 
1744   /// Records whether it is allowed to have the original scalar loop execute at
1745   /// least once. This may be needed as a fallback loop in case runtime
1746   /// aliasing/dependence checks fail, or to handle the tail/remainder
1747   /// iterations when the trip count is unknown or doesn't divide by the VF,
1748   /// or as a peel-loop to handle gaps in interleave-groups.
1749   /// Under optsize and when the trip count is very small we don't allow any
1750   /// iterations to execute in the scalar loop.
1751   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1752 
1753   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1754   bool FoldTailByMasking = false;
1755 
1756   /// A map holding scalar costs for different vectorization factors. The
1757   /// presence of a cost for an instruction in the mapping indicates that the
1758   /// instruction will be scalarized when vectorizing with the associated
1759   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1760   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1761 
1762   /// Holds the instructions known to be uniform after vectorization.
1763   /// The data is collected per VF.
1764   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1765 
1766   /// Holds the instructions known to be scalar after vectorization.
1767   /// The data is collected per VF.
1768   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1769 
1770   /// Holds the instructions (address computations) that are forced to be
1771   /// scalarized.
1772   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1773 
1774   /// PHINodes of the reductions that should be expanded in-loop along with
1775   /// their associated chains of reduction operations, in program order from top
1776   /// (PHI) to bottom
1777   ReductionChainMap InLoopReductionChains;
1778 
1779   /// A Map of inloop reduction operations and their immediate chain operand.
1780   /// FIXME: This can be removed once reductions can be costed correctly in
1781   /// vplan. This was added to allow quick lookup to the inloop operations,
1782   /// without having to loop through InLoopReductionChains.
1783   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1784 
1785   /// Returns the expected difference in cost from scalarizing the expression
1786   /// feeding a predicated instruction \p PredInst. The instructions to
1787   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1788   /// non-negative return value implies the expression will be scalarized.
1789   /// Currently, only single-use chains are considered for scalarization.
1790   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1791                               ElementCount VF);
1792 
1793   /// Collect the instructions that are uniform after vectorization. An
1794   /// instruction is uniform if we represent it with a single scalar value in
1795   /// the vectorized loop corresponding to each vector iteration. Examples of
1796   /// uniform instructions include pointer operands of consecutive or
1797   /// interleaved memory accesses. Note that although uniformity implies an
1798   /// instruction will be scalar, the reverse is not true. In general, a
1799   /// scalarized instruction will be represented by VF scalar values in the
1800   /// vectorized loop, each corresponding to an iteration of the original
1801   /// scalar loop.
1802   void collectLoopUniforms(ElementCount VF);
1803 
1804   /// Collect the instructions that are scalar after vectorization. An
1805   /// instruction is scalar if it is known to be uniform or will be scalarized
1806   /// during vectorization. Non-uniform scalarized instructions will be
1807   /// represented by VF values in the vectorized loop, each corresponding to an
1808   /// iteration of the original scalar loop.
1809   void collectLoopScalars(ElementCount VF);
1810 
1811   /// Keeps cost model vectorization decision and cost for instructions.
1812   /// Right now it is used for memory instructions only.
1813   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1814                                 std::pair<InstWidening, InstructionCost>>;
1815 
1816   DecisionList WideningDecisions;
1817 
1818   /// Returns true if \p V is expected to be vectorized and it needs to be
1819   /// extracted.
1820   bool needsExtract(Value *V, ElementCount VF) const {
1821     Instruction *I = dyn_cast<Instruction>(V);
1822     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1823         TheLoop->isLoopInvariant(I))
1824       return false;
1825 
1826     // Assume we can vectorize V (and hence we need extraction) if the
1827     // scalars are not computed yet. This can happen, because it is called
1828     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1829     // the scalars are collected. That should be a safe assumption in most
1830     // cases, because we check if the operands have vectorizable types
1831     // beforehand in LoopVectorizationLegality.
1832     return Scalars.find(VF) == Scalars.end() ||
1833            !isScalarAfterVectorization(I, VF);
1834   };
1835 
1836   /// Returns a range containing only operands needing to be extracted.
1837   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1838                                                    ElementCount VF) const {
1839     return SmallVector<Value *, 4>(make_filter_range(
1840         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1841   }
1842 
1843   /// Determines if we have the infrastructure to vectorize loop \p L and its
1844   /// epilogue, assuming the main loop is vectorized by \p VF.
1845   bool isCandidateForEpilogueVectorization(const Loop &L,
1846                                            const ElementCount VF) const;
1847 
1848   /// Returns true if epilogue vectorization is considered profitable, and
1849   /// false otherwise.
1850   /// \p VF is the vectorization factor chosen for the original loop.
1851   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1852 
1853 public:
1854   /// The loop that we evaluate.
1855   Loop *TheLoop;
1856 
1857   /// Predicated scalar evolution analysis.
1858   PredicatedScalarEvolution &PSE;
1859 
1860   /// Loop Info analysis.
1861   LoopInfo *LI;
1862 
1863   /// Vectorization legality.
1864   LoopVectorizationLegality *Legal;
1865 
1866   /// Vector target information.
1867   const TargetTransformInfo &TTI;
1868 
1869   /// Target Library Info.
1870   const TargetLibraryInfo *TLI;
1871 
1872   /// Demanded bits analysis.
1873   DemandedBits *DB;
1874 
1875   /// Assumption cache.
1876   AssumptionCache *AC;
1877 
1878   /// Interface to emit optimization remarks.
1879   OptimizationRemarkEmitter *ORE;
1880 
1881   const Function *TheFunction;
1882 
1883   /// Loop Vectorize Hint.
1884   const LoopVectorizeHints *Hints;
1885 
1886   /// The interleave access information contains groups of interleaved accesses
1887   /// with the same stride and close to each other.
1888   InterleavedAccessInfo &InterleaveInfo;
1889 
1890   /// Values to ignore in the cost model.
1891   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1892 
1893   /// Values to ignore in the cost model when VF > 1.
1894   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1895 
1896   /// All element types found in the loop.
1897   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1898 
1899   /// Profitable vector factors.
1900   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1901 };
1902 } // end namespace llvm
1903 
1904 /// Helper struct to manage generating runtime checks for vectorization.
1905 ///
1906 /// The runtime checks are created up-front in temporary blocks to allow better
1907 /// estimating the cost and un-linked from the existing IR. After deciding to
1908 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1909 /// temporary blocks are completely removed.
1910 class GeneratedRTChecks {
1911   /// Basic block which contains the generated SCEV checks, if any.
1912   BasicBlock *SCEVCheckBlock = nullptr;
1913 
1914   /// The value representing the result of the generated SCEV checks. If it is
1915   /// nullptr, either no SCEV checks have been generated or they have been used.
1916   Value *SCEVCheckCond = nullptr;
1917 
1918   /// Basic block which contains the generated memory runtime checks, if any.
1919   BasicBlock *MemCheckBlock = nullptr;
1920 
1921   /// The value representing the result of the generated memory runtime checks.
1922   /// If it is nullptr, either no memory runtime checks have been generated or
1923   /// they have been used.
1924   Instruction *MemRuntimeCheckCond = nullptr;
1925 
1926   DominatorTree *DT;
1927   LoopInfo *LI;
1928 
1929   SCEVExpander SCEVExp;
1930   SCEVExpander MemCheckExp;
1931 
1932 public:
1933   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1934                     const DataLayout &DL)
1935       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1936         MemCheckExp(SE, DL, "scev.check") {}
1937 
1938   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1939   /// accurately estimate the cost of the runtime checks. The blocks are
1940   /// un-linked from the IR and is added back during vector code generation. If
1941   /// there is no vector code generation, the check blocks are removed
1942   /// completely.
1943   void Create(Loop *L, const LoopAccessInfo &LAI,
1944               const SCEVUnionPredicate &UnionPred) {
1945 
1946     BasicBlock *LoopHeader = L->getHeader();
1947     BasicBlock *Preheader = L->getLoopPreheader();
1948 
1949     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1950     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1951     // may be used by SCEVExpander. The blocks will be un-linked from their
1952     // predecessors and removed from LI & DT at the end of the function.
1953     if (!UnionPred.isAlwaysTrue()) {
1954       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1955                                   nullptr, "vector.scevcheck");
1956 
1957       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1958           &UnionPred, SCEVCheckBlock->getTerminator());
1959     }
1960 
1961     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1962     if (RtPtrChecking.Need) {
1963       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1964       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1965                                  "vector.memcheck");
1966 
1967       std::tie(std::ignore, MemRuntimeCheckCond) =
1968           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1969                            RtPtrChecking.getChecks(), MemCheckExp);
1970       assert(MemRuntimeCheckCond &&
1971              "no RT checks generated although RtPtrChecking "
1972              "claimed checks are required");
1973     }
1974 
1975     if (!MemCheckBlock && !SCEVCheckBlock)
1976       return;
1977 
1978     // Unhook the temporary block with the checks, update various places
1979     // accordingly.
1980     if (SCEVCheckBlock)
1981       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1982     if (MemCheckBlock)
1983       MemCheckBlock->replaceAllUsesWith(Preheader);
1984 
1985     if (SCEVCheckBlock) {
1986       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1987       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1988       Preheader->getTerminator()->eraseFromParent();
1989     }
1990     if (MemCheckBlock) {
1991       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1992       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1993       Preheader->getTerminator()->eraseFromParent();
1994     }
1995 
1996     DT->changeImmediateDominator(LoopHeader, Preheader);
1997     if (MemCheckBlock) {
1998       DT->eraseNode(MemCheckBlock);
1999       LI->removeBlock(MemCheckBlock);
2000     }
2001     if (SCEVCheckBlock) {
2002       DT->eraseNode(SCEVCheckBlock);
2003       LI->removeBlock(SCEVCheckBlock);
2004     }
2005   }
2006 
2007   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2008   /// unused.
2009   ~GeneratedRTChecks() {
2010     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
2011     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
2012     if (!SCEVCheckCond)
2013       SCEVCleaner.markResultUsed();
2014 
2015     if (!MemRuntimeCheckCond)
2016       MemCheckCleaner.markResultUsed();
2017 
2018     if (MemRuntimeCheckCond) {
2019       auto &SE = *MemCheckExp.getSE();
2020       // Memory runtime check generation creates compares that use expanded
2021       // values. Remove them before running the SCEVExpanderCleaners.
2022       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2023         if (MemCheckExp.isInsertedInstruction(&I))
2024           continue;
2025         SE.forgetValue(&I);
2026         SE.eraseValueFromMap(&I);
2027         I.eraseFromParent();
2028       }
2029     }
2030     MemCheckCleaner.cleanup();
2031     SCEVCleaner.cleanup();
2032 
2033     if (SCEVCheckCond)
2034       SCEVCheckBlock->eraseFromParent();
2035     if (MemRuntimeCheckCond)
2036       MemCheckBlock->eraseFromParent();
2037   }
2038 
2039   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2040   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2041   /// depending on the generated condition.
2042   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2043                              BasicBlock *LoopVectorPreHeader,
2044                              BasicBlock *LoopExitBlock) {
2045     if (!SCEVCheckCond)
2046       return nullptr;
2047     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2048       if (C->isZero())
2049         return nullptr;
2050 
2051     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2052 
2053     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2054     // Create new preheader for vector loop.
2055     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2056       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2057 
2058     SCEVCheckBlock->getTerminator()->eraseFromParent();
2059     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2060     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2061                                                 SCEVCheckBlock);
2062 
2063     DT->addNewBlock(SCEVCheckBlock, Pred);
2064     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2065 
2066     ReplaceInstWithInst(
2067         SCEVCheckBlock->getTerminator(),
2068         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2069     // Mark the check as used, to prevent it from being removed during cleanup.
2070     SCEVCheckCond = nullptr;
2071     return SCEVCheckBlock;
2072   }
2073 
2074   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2075   /// the branches to branch to the vector preheader or \p Bypass, depending on
2076   /// the generated condition.
2077   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2078                                    BasicBlock *LoopVectorPreHeader) {
2079     // Check if we generated code that checks in runtime if arrays overlap.
2080     if (!MemRuntimeCheckCond)
2081       return nullptr;
2082 
2083     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2084     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2085                                                 MemCheckBlock);
2086 
2087     DT->addNewBlock(MemCheckBlock, Pred);
2088     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2089     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2090 
2091     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2092       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2093 
2094     ReplaceInstWithInst(
2095         MemCheckBlock->getTerminator(),
2096         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2097     MemCheckBlock->getTerminator()->setDebugLoc(
2098         Pred->getTerminator()->getDebugLoc());
2099 
2100     // Mark the check as used, to prevent it from being removed during cleanup.
2101     MemRuntimeCheckCond = nullptr;
2102     return MemCheckBlock;
2103   }
2104 };
2105 
2106 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2107 // vectorization. The loop needs to be annotated with #pragma omp simd
2108 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2109 // vector length information is not provided, vectorization is not considered
2110 // explicit. Interleave hints are not allowed either. These limitations will be
2111 // relaxed in the future.
2112 // Please, note that we are currently forced to abuse the pragma 'clang
2113 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2114 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2115 // provides *explicit vectorization hints* (LV can bypass legal checks and
2116 // assume that vectorization is legal). However, both hints are implemented
2117 // using the same metadata (llvm.loop.vectorize, processed by
2118 // LoopVectorizeHints). This will be fixed in the future when the native IR
2119 // representation for pragma 'omp simd' is introduced.
2120 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2121                                    OptimizationRemarkEmitter *ORE) {
2122   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2123   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2124 
2125   // Only outer loops with an explicit vectorization hint are supported.
2126   // Unannotated outer loops are ignored.
2127   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2128     return false;
2129 
2130   Function *Fn = OuterLp->getHeader()->getParent();
2131   if (!Hints.allowVectorization(Fn, OuterLp,
2132                                 true /*VectorizeOnlyWhenForced*/)) {
2133     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2134     return false;
2135   }
2136 
2137   if (Hints.getInterleave() > 1) {
2138     // TODO: Interleave support is future work.
2139     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2140                          "outer loops.\n");
2141     Hints.emitRemarkWithHints();
2142     return false;
2143   }
2144 
2145   return true;
2146 }
2147 
2148 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2149                                   OptimizationRemarkEmitter *ORE,
2150                                   SmallVectorImpl<Loop *> &V) {
2151   // Collect inner loops and outer loops without irreducible control flow. For
2152   // now, only collect outer loops that have explicit vectorization hints. If we
2153   // are stress testing the VPlan H-CFG construction, we collect the outermost
2154   // loop of every loop nest.
2155   if (L.isInnermost() || VPlanBuildStressTest ||
2156       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2157     LoopBlocksRPO RPOT(&L);
2158     RPOT.perform(LI);
2159     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2160       V.push_back(&L);
2161       // TODO: Collect inner loops inside marked outer loops in case
2162       // vectorization fails for the outer loop. Do not invoke
2163       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2164       // already known to be reducible. We can use an inherited attribute for
2165       // that.
2166       return;
2167     }
2168   }
2169   for (Loop *InnerL : L)
2170     collectSupportedLoops(*InnerL, LI, ORE, V);
2171 }
2172 
2173 namespace {
2174 
2175 /// The LoopVectorize Pass.
2176 struct LoopVectorize : public FunctionPass {
2177   /// Pass identification, replacement for typeid
2178   static char ID;
2179 
2180   LoopVectorizePass Impl;
2181 
2182   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2183                          bool VectorizeOnlyWhenForced = false)
2184       : FunctionPass(ID),
2185         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2186     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2187   }
2188 
2189   bool runOnFunction(Function &F) override {
2190     if (skipFunction(F))
2191       return false;
2192 
2193     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2194     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2195     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2196     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2197     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2198     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2199     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2200     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2201     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2202     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2203     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2204     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2205     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2206 
2207     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2208         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2209 
2210     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2211                         GetLAA, *ORE, PSI).MadeAnyChange;
2212   }
2213 
2214   void getAnalysisUsage(AnalysisUsage &AU) const override {
2215     AU.addRequired<AssumptionCacheTracker>();
2216     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2217     AU.addRequired<DominatorTreeWrapperPass>();
2218     AU.addRequired<LoopInfoWrapperPass>();
2219     AU.addRequired<ScalarEvolutionWrapperPass>();
2220     AU.addRequired<TargetTransformInfoWrapperPass>();
2221     AU.addRequired<AAResultsWrapperPass>();
2222     AU.addRequired<LoopAccessLegacyAnalysis>();
2223     AU.addRequired<DemandedBitsWrapperPass>();
2224     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2225     AU.addRequired<InjectTLIMappingsLegacy>();
2226 
2227     // We currently do not preserve loopinfo/dominator analyses with outer loop
2228     // vectorization. Until this is addressed, mark these analyses as preserved
2229     // only for non-VPlan-native path.
2230     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2231     if (!EnableVPlanNativePath) {
2232       AU.addPreserved<LoopInfoWrapperPass>();
2233       AU.addPreserved<DominatorTreeWrapperPass>();
2234     }
2235 
2236     AU.addPreserved<BasicAAWrapperPass>();
2237     AU.addPreserved<GlobalsAAWrapperPass>();
2238     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2239   }
2240 };
2241 
2242 } // end anonymous namespace
2243 
2244 //===----------------------------------------------------------------------===//
2245 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2246 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2247 //===----------------------------------------------------------------------===//
2248 
2249 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2250   // We need to place the broadcast of invariant variables outside the loop,
2251   // but only if it's proven safe to do so. Else, broadcast will be inside
2252   // vector loop body.
2253   Instruction *Instr = dyn_cast<Instruction>(V);
2254   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2255                      (!Instr ||
2256                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2257   // Place the code for broadcasting invariant variables in the new preheader.
2258   IRBuilder<>::InsertPointGuard Guard(Builder);
2259   if (SafeToHoist)
2260     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2261 
2262   // Broadcast the scalar into all locations in the vector.
2263   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2264 
2265   return Shuf;
2266 }
2267 
2268 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2269     const InductionDescriptor &II, Value *Step, Value *Start,
2270     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2271     VPTransformState &State) {
2272   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2273          "Expected either an induction phi-node or a truncate of it!");
2274 
2275   // Construct the initial value of the vector IV in the vector loop preheader
2276   auto CurrIP = Builder.saveIP();
2277   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2278   if (isa<TruncInst>(EntryVal)) {
2279     assert(Start->getType()->isIntegerTy() &&
2280            "Truncation requires an integer type");
2281     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2282     Step = Builder.CreateTrunc(Step, TruncType);
2283     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2284   }
2285   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2286   Value *SteppedStart =
2287       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2288 
2289   // We create vector phi nodes for both integer and floating-point induction
2290   // variables. Here, we determine the kind of arithmetic we will perform.
2291   Instruction::BinaryOps AddOp;
2292   Instruction::BinaryOps MulOp;
2293   if (Step->getType()->isIntegerTy()) {
2294     AddOp = Instruction::Add;
2295     MulOp = Instruction::Mul;
2296   } else {
2297     AddOp = II.getInductionOpcode();
2298     MulOp = Instruction::FMul;
2299   }
2300 
2301   // Multiply the vectorization factor by the step using integer or
2302   // floating-point arithmetic as appropriate.
2303   Type *StepType = Step->getType();
2304   if (Step->getType()->isFloatingPointTy())
2305     StepType = IntegerType::get(StepType->getContext(),
2306                                 StepType->getScalarSizeInBits());
2307   Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2308   if (Step->getType()->isFloatingPointTy())
2309     RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType());
2310   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2311 
2312   // Create a vector splat to use in the induction update.
2313   //
2314   // FIXME: If the step is non-constant, we create the vector splat with
2315   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2316   //        handle a constant vector splat.
2317   Value *SplatVF = isa<Constant>(Mul)
2318                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2319                        : Builder.CreateVectorSplat(VF, Mul);
2320   Builder.restoreIP(CurrIP);
2321 
2322   // We may need to add the step a number of times, depending on the unroll
2323   // factor. The last of those goes into the PHI.
2324   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2325                                     &*LoopVectorBody->getFirstInsertionPt());
2326   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2327   Instruction *LastInduction = VecInd;
2328   for (unsigned Part = 0; Part < UF; ++Part) {
2329     State.set(Def, LastInduction, Part);
2330 
2331     if (isa<TruncInst>(EntryVal))
2332       addMetadata(LastInduction, EntryVal);
2333     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2334                                           State, Part);
2335 
2336     LastInduction = cast<Instruction>(
2337         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2338     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2339   }
2340 
2341   // Move the last step to the end of the latch block. This ensures consistent
2342   // placement of all induction updates.
2343   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2344   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2345   auto *ICmp = cast<Instruction>(Br->getCondition());
2346   LastInduction->moveBefore(ICmp);
2347   LastInduction->setName("vec.ind.next");
2348 
2349   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2350   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2351 }
2352 
2353 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2354   return Cost->isScalarAfterVectorization(I, VF) ||
2355          Cost->isProfitableToScalarize(I, VF);
2356 }
2357 
2358 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2359   if (shouldScalarizeInstruction(IV))
2360     return true;
2361   auto isScalarInst = [&](User *U) -> bool {
2362     auto *I = cast<Instruction>(U);
2363     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2364   };
2365   return llvm::any_of(IV->users(), isScalarInst);
2366 }
2367 
2368 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2369     const InductionDescriptor &ID, const Instruction *EntryVal,
2370     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2371     unsigned Part, unsigned Lane) {
2372   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2373          "Expected either an induction phi-node or a truncate of it!");
2374 
2375   // This induction variable is not the phi from the original loop but the
2376   // newly-created IV based on the proof that casted Phi is equal to the
2377   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2378   // re-uses the same InductionDescriptor that original IV uses but we don't
2379   // have to do any recording in this case - that is done when original IV is
2380   // processed.
2381   if (isa<TruncInst>(EntryVal))
2382     return;
2383 
2384   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2385   if (Casts.empty())
2386     return;
2387   // Only the first Cast instruction in the Casts vector is of interest.
2388   // The rest of the Casts (if exist) have no uses outside the
2389   // induction update chain itself.
2390   if (Lane < UINT_MAX)
2391     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2392   else
2393     State.set(CastDef, VectorLoopVal, Part);
2394 }
2395 
2396 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2397                                                 TruncInst *Trunc, VPValue *Def,
2398                                                 VPValue *CastDef,
2399                                                 VPTransformState &State) {
2400   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2401          "Primary induction variable must have an integer type");
2402 
2403   auto II = Legal->getInductionVars().find(IV);
2404   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2405 
2406   auto ID = II->second;
2407   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2408 
2409   // The value from the original loop to which we are mapping the new induction
2410   // variable.
2411   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2412 
2413   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2414 
2415   // Generate code for the induction step. Note that induction steps are
2416   // required to be loop-invariant
2417   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2418     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2419            "Induction step should be loop invariant");
2420     if (PSE.getSE()->isSCEVable(IV->getType())) {
2421       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2422       return Exp.expandCodeFor(Step, Step->getType(),
2423                                LoopVectorPreHeader->getTerminator());
2424     }
2425     return cast<SCEVUnknown>(Step)->getValue();
2426   };
2427 
2428   // The scalar value to broadcast. This is derived from the canonical
2429   // induction variable. If a truncation type is given, truncate the canonical
2430   // induction variable and step. Otherwise, derive these values from the
2431   // induction descriptor.
2432   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2433     Value *ScalarIV = Induction;
2434     if (IV != OldInduction) {
2435       ScalarIV = IV->getType()->isIntegerTy()
2436                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2437                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2438                                           IV->getType());
2439       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2440       ScalarIV->setName("offset.idx");
2441     }
2442     if (Trunc) {
2443       auto *TruncType = cast<IntegerType>(Trunc->getType());
2444       assert(Step->getType()->isIntegerTy() &&
2445              "Truncation requires an integer step");
2446       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2447       Step = Builder.CreateTrunc(Step, TruncType);
2448     }
2449     return ScalarIV;
2450   };
2451 
2452   // Create the vector values from the scalar IV, in the absence of creating a
2453   // vector IV.
2454   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2455     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2456     for (unsigned Part = 0; Part < UF; ++Part) {
2457       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2458       Value *EntryPart =
2459           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2460                         ID.getInductionOpcode());
2461       State.set(Def, EntryPart, Part);
2462       if (Trunc)
2463         addMetadata(EntryPart, Trunc);
2464       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2465                                             State, Part);
2466     }
2467   };
2468 
2469   // Fast-math-flags propagate from the original induction instruction.
2470   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2471   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2472     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2473 
2474   // Now do the actual transformations, and start with creating the step value.
2475   Value *Step = CreateStepValue(ID.getStep());
2476   if (VF.isZero() || VF.isScalar()) {
2477     Value *ScalarIV = CreateScalarIV(Step);
2478     CreateSplatIV(ScalarIV, Step);
2479     return;
2480   }
2481 
2482   // Determine if we want a scalar version of the induction variable. This is
2483   // true if the induction variable itself is not widened, or if it has at
2484   // least one user in the loop that is not widened.
2485   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2486   if (!NeedsScalarIV) {
2487     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2488                                     State);
2489     return;
2490   }
2491 
2492   // Try to create a new independent vector induction variable. If we can't
2493   // create the phi node, we will splat the scalar induction variable in each
2494   // loop iteration.
2495   if (!shouldScalarizeInstruction(EntryVal)) {
2496     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2497                                     State);
2498     Value *ScalarIV = CreateScalarIV(Step);
2499     // Create scalar steps that can be used by instructions we will later
2500     // scalarize. Note that the addition of the scalar steps will not increase
2501     // the number of instructions in the loop in the common case prior to
2502     // InstCombine. We will be trading one vector extract for each scalar step.
2503     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2504     return;
2505   }
2506 
2507   // All IV users are scalar instructions, so only emit a scalar IV, not a
2508   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2509   // predicate used by the masked loads/stores.
2510   Value *ScalarIV = CreateScalarIV(Step);
2511   if (!Cost->isScalarEpilogueAllowed())
2512     CreateSplatIV(ScalarIV, Step);
2513   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2514 }
2515 
2516 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2517                                           Instruction::BinaryOps BinOp) {
2518   // Create and check the types.
2519   auto *ValVTy = cast<VectorType>(Val->getType());
2520   ElementCount VLen = ValVTy->getElementCount();
2521 
2522   Type *STy = Val->getType()->getScalarType();
2523   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2524          "Induction Step must be an integer or FP");
2525   assert(Step->getType() == STy && "Step has wrong type");
2526 
2527   SmallVector<Constant *, 8> Indices;
2528 
2529   // Create a vector of consecutive numbers from zero to VF.
2530   VectorType *InitVecValVTy = ValVTy;
2531   Type *InitVecValSTy = STy;
2532   if (STy->isFloatingPointTy()) {
2533     InitVecValSTy =
2534         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2535     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2536   }
2537   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2538 
2539   // Add on StartIdx
2540   Value *StartIdxSplat = Builder.CreateVectorSplat(
2541       VLen, ConstantInt::get(InitVecValSTy, StartIdx));
2542   InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2543 
2544   if (STy->isIntegerTy()) {
2545     Step = Builder.CreateVectorSplat(VLen, Step);
2546     assert(Step->getType() == Val->getType() && "Invalid step vec");
2547     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2548     // which can be found from the original scalar operations.
2549     Step = Builder.CreateMul(InitVec, Step);
2550     return Builder.CreateAdd(Val, Step, "induction");
2551   }
2552 
2553   // Floating point induction.
2554   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2555          "Binary Opcode should be specified for FP induction");
2556   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2557   Step = Builder.CreateVectorSplat(VLen, Step);
2558   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2559   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2560 }
2561 
2562 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2563                                            Instruction *EntryVal,
2564                                            const InductionDescriptor &ID,
2565                                            VPValue *Def, VPValue *CastDef,
2566                                            VPTransformState &State) {
2567   // We shouldn't have to build scalar steps if we aren't vectorizing.
2568   assert(VF.isVector() && "VF should be greater than one");
2569   // Get the value type and ensure it and the step have the same integer type.
2570   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2571   assert(ScalarIVTy == Step->getType() &&
2572          "Val and Step should have the same type");
2573 
2574   // We build scalar steps for both integer and floating-point induction
2575   // variables. Here, we determine the kind of arithmetic we will perform.
2576   Instruction::BinaryOps AddOp;
2577   Instruction::BinaryOps MulOp;
2578   if (ScalarIVTy->isIntegerTy()) {
2579     AddOp = Instruction::Add;
2580     MulOp = Instruction::Mul;
2581   } else {
2582     AddOp = ID.getInductionOpcode();
2583     MulOp = Instruction::FMul;
2584   }
2585 
2586   // Determine the number of scalars we need to generate for each unroll
2587   // iteration. If EntryVal is uniform, we only need to generate the first
2588   // lane. Otherwise, we generate all VF values.
2589   bool IsUniform =
2590       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2591   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2592   // Compute the scalar steps and save the results in State.
2593   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2594                                      ScalarIVTy->getScalarSizeInBits());
2595   Type *VecIVTy = nullptr;
2596   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2597   if (!IsUniform && VF.isScalable()) {
2598     VecIVTy = VectorType::get(ScalarIVTy, VF);
2599     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2600     SplatStep = Builder.CreateVectorSplat(VF, Step);
2601     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2602   }
2603 
2604   for (unsigned Part = 0; Part < UF; ++Part) {
2605     Value *StartIdx0 =
2606         createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2607 
2608     if (!IsUniform && VF.isScalable()) {
2609       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2610       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2611       if (ScalarIVTy->isFloatingPointTy())
2612         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2613       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2614       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2615       State.set(Def, Add, Part);
2616       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2617                                             Part);
2618       // It's useful to record the lane values too for the known minimum number
2619       // of elements so we do those below. This improves the code quality when
2620       // trying to extract the first element, for example.
2621     }
2622 
2623     if (ScalarIVTy->isFloatingPointTy())
2624       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2625 
2626     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2627       Value *StartIdx = Builder.CreateBinOp(
2628           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2629       // The step returned by `createStepForVF` is a runtime-evaluated value
2630       // when VF is scalable. Otherwise, it should be folded into a Constant.
2631       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2632              "Expected StartIdx to be folded to a constant when VF is not "
2633              "scalable");
2634       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2635       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2636       State.set(Def, Add, VPIteration(Part, Lane));
2637       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2638                                             Part, Lane);
2639     }
2640   }
2641 }
2642 
2643 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2644                                                     const VPIteration &Instance,
2645                                                     VPTransformState &State) {
2646   Value *ScalarInst = State.get(Def, Instance);
2647   Value *VectorValue = State.get(Def, Instance.Part);
2648   VectorValue = Builder.CreateInsertElement(
2649       VectorValue, ScalarInst,
2650       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2651   State.set(Def, VectorValue, Instance.Part);
2652 }
2653 
2654 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2655   assert(Vec->getType()->isVectorTy() && "Invalid type");
2656   return Builder.CreateVectorReverse(Vec, "reverse");
2657 }
2658 
2659 // Return whether we allow using masked interleave-groups (for dealing with
2660 // strided loads/stores that reside in predicated blocks, or for dealing
2661 // with gaps).
2662 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2663   // If an override option has been passed in for interleaved accesses, use it.
2664   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2665     return EnableMaskedInterleavedMemAccesses;
2666 
2667   return TTI.enableMaskedInterleavedAccessVectorization();
2668 }
2669 
2670 // Try to vectorize the interleave group that \p Instr belongs to.
2671 //
2672 // E.g. Translate following interleaved load group (factor = 3):
2673 //   for (i = 0; i < N; i+=3) {
2674 //     R = Pic[i];             // Member of index 0
2675 //     G = Pic[i+1];           // Member of index 1
2676 //     B = Pic[i+2];           // Member of index 2
2677 //     ... // do something to R, G, B
2678 //   }
2679 // To:
2680 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2681 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2682 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2683 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2684 //
2685 // Or translate following interleaved store group (factor = 3):
2686 //   for (i = 0; i < N; i+=3) {
2687 //     ... do something to R, G, B
2688 //     Pic[i]   = R;           // Member of index 0
2689 //     Pic[i+1] = G;           // Member of index 1
2690 //     Pic[i+2] = B;           // Member of index 2
2691 //   }
2692 // To:
2693 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2694 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2695 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2696 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2697 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2698 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2699     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2700     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2701     VPValue *BlockInMask) {
2702   Instruction *Instr = Group->getInsertPos();
2703   const DataLayout &DL = Instr->getModule()->getDataLayout();
2704 
2705   // Prepare for the vector type of the interleaved load/store.
2706   Type *ScalarTy = getLoadStoreType(Instr);
2707   unsigned InterleaveFactor = Group->getFactor();
2708   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2709   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2710 
2711   // Prepare for the new pointers.
2712   SmallVector<Value *, 2> AddrParts;
2713   unsigned Index = Group->getIndex(Instr);
2714 
2715   // TODO: extend the masked interleaved-group support to reversed access.
2716   assert((!BlockInMask || !Group->isReverse()) &&
2717          "Reversed masked interleave-group not supported.");
2718 
2719   // If the group is reverse, adjust the index to refer to the last vector lane
2720   // instead of the first. We adjust the index from the first vector lane,
2721   // rather than directly getting the pointer for lane VF - 1, because the
2722   // pointer operand of the interleaved access is supposed to be uniform. For
2723   // uniform instructions, we're only required to generate a value for the
2724   // first vector lane in each unroll iteration.
2725   if (Group->isReverse())
2726     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2727 
2728   for (unsigned Part = 0; Part < UF; Part++) {
2729     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2730     setDebugLocFromInst(AddrPart);
2731 
2732     // Notice current instruction could be any index. Need to adjust the address
2733     // to the member of index 0.
2734     //
2735     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2736     //       b = A[i];       // Member of index 0
2737     // Current pointer is pointed to A[i+1], adjust it to A[i].
2738     //
2739     // E.g.  A[i+1] = a;     // Member of index 1
2740     //       A[i]   = b;     // Member of index 0
2741     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2742     // Current pointer is pointed to A[i+2], adjust it to A[i].
2743 
2744     bool InBounds = false;
2745     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2746       InBounds = gep->isInBounds();
2747     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2748     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2749 
2750     // Cast to the vector pointer type.
2751     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2752     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2753     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2754   }
2755 
2756   setDebugLocFromInst(Instr);
2757   Value *PoisonVec = PoisonValue::get(VecTy);
2758 
2759   Value *MaskForGaps = nullptr;
2760   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2761     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2762     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2763   }
2764 
2765   // Vectorize the interleaved load group.
2766   if (isa<LoadInst>(Instr)) {
2767     // For each unroll part, create a wide load for the group.
2768     SmallVector<Value *, 2> NewLoads;
2769     for (unsigned Part = 0; Part < UF; Part++) {
2770       Instruction *NewLoad;
2771       if (BlockInMask || MaskForGaps) {
2772         assert(useMaskedInterleavedAccesses(*TTI) &&
2773                "masked interleaved groups are not allowed.");
2774         Value *GroupMask = MaskForGaps;
2775         if (BlockInMask) {
2776           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2777           Value *ShuffledMask = Builder.CreateShuffleVector(
2778               BlockInMaskPart,
2779               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2780               "interleaved.mask");
2781           GroupMask = MaskForGaps
2782                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2783                                                 MaskForGaps)
2784                           : ShuffledMask;
2785         }
2786         NewLoad =
2787             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2788                                      GroupMask, PoisonVec, "wide.masked.vec");
2789       }
2790       else
2791         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2792                                             Group->getAlign(), "wide.vec");
2793       Group->addMetadata(NewLoad);
2794       NewLoads.push_back(NewLoad);
2795     }
2796 
2797     // For each member in the group, shuffle out the appropriate data from the
2798     // wide loads.
2799     unsigned J = 0;
2800     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2801       Instruction *Member = Group->getMember(I);
2802 
2803       // Skip the gaps in the group.
2804       if (!Member)
2805         continue;
2806 
2807       auto StrideMask =
2808           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2809       for (unsigned Part = 0; Part < UF; Part++) {
2810         Value *StridedVec = Builder.CreateShuffleVector(
2811             NewLoads[Part], StrideMask, "strided.vec");
2812 
2813         // If this member has different type, cast the result type.
2814         if (Member->getType() != ScalarTy) {
2815           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2816           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2817           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2818         }
2819 
2820         if (Group->isReverse())
2821           StridedVec = reverseVector(StridedVec);
2822 
2823         State.set(VPDefs[J], StridedVec, Part);
2824       }
2825       ++J;
2826     }
2827     return;
2828   }
2829 
2830   // The sub vector type for current instruction.
2831   auto *SubVT = VectorType::get(ScalarTy, VF);
2832 
2833   // Vectorize the interleaved store group.
2834   for (unsigned Part = 0; Part < UF; Part++) {
2835     // Collect the stored vector from each member.
2836     SmallVector<Value *, 4> StoredVecs;
2837     for (unsigned i = 0; i < InterleaveFactor; i++) {
2838       // Interleaved store group doesn't allow a gap, so each index has a member
2839       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2840 
2841       Value *StoredVec = State.get(StoredValues[i], Part);
2842 
2843       if (Group->isReverse())
2844         StoredVec = reverseVector(StoredVec);
2845 
2846       // If this member has different type, cast it to a unified type.
2847 
2848       if (StoredVec->getType() != SubVT)
2849         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2850 
2851       StoredVecs.push_back(StoredVec);
2852     }
2853 
2854     // Concatenate all vectors into a wide vector.
2855     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2856 
2857     // Interleave the elements in the wide vector.
2858     Value *IVec = Builder.CreateShuffleVector(
2859         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2860         "interleaved.vec");
2861 
2862     Instruction *NewStoreInstr;
2863     if (BlockInMask) {
2864       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2865       Value *ShuffledMask = Builder.CreateShuffleVector(
2866           BlockInMaskPart,
2867           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2868           "interleaved.mask");
2869       NewStoreInstr = Builder.CreateMaskedStore(
2870           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2871     }
2872     else
2873       NewStoreInstr =
2874           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2875 
2876     Group->addMetadata(NewStoreInstr);
2877   }
2878 }
2879 
2880 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2881     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2882     VPValue *StoredValue, VPValue *BlockInMask) {
2883   // Attempt to issue a wide load.
2884   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2885   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2886 
2887   assert((LI || SI) && "Invalid Load/Store instruction");
2888   assert((!SI || StoredValue) && "No stored value provided for widened store");
2889   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2890 
2891   LoopVectorizationCostModel::InstWidening Decision =
2892       Cost->getWideningDecision(Instr, VF);
2893   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2894           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2895           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2896          "CM decision is not to widen the memory instruction");
2897 
2898   Type *ScalarDataTy = getLoadStoreType(Instr);
2899 
2900   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2901   const Align Alignment = getLoadStoreAlignment(Instr);
2902 
2903   // Determine if the pointer operand of the access is either consecutive or
2904   // reverse consecutive.
2905   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2906   bool ConsecutiveStride =
2907       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2908   bool CreateGatherScatter =
2909       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2910 
2911   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2912   // gather/scatter. Otherwise Decision should have been to Scalarize.
2913   assert((ConsecutiveStride || CreateGatherScatter) &&
2914          "The instruction should be scalarized");
2915   (void)ConsecutiveStride;
2916 
2917   VectorParts BlockInMaskParts(UF);
2918   bool isMaskRequired = BlockInMask;
2919   if (isMaskRequired)
2920     for (unsigned Part = 0; Part < UF; ++Part)
2921       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2922 
2923   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2924     // Calculate the pointer for the specific unroll-part.
2925     GetElementPtrInst *PartPtr = nullptr;
2926 
2927     bool InBounds = false;
2928     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2929       InBounds = gep->isInBounds();
2930     if (Reverse) {
2931       // If the address is consecutive but reversed, then the
2932       // wide store needs to start at the last vector element.
2933       // RunTimeVF =  VScale * VF.getKnownMinValue()
2934       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
2935       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF);
2936       // NumElt = -Part * RunTimeVF
2937       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
2938       // LastLane = 1 - RunTimeVF
2939       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
2940       PartPtr =
2941           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
2942       PartPtr->setIsInBounds(InBounds);
2943       PartPtr = cast<GetElementPtrInst>(
2944           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
2945       PartPtr->setIsInBounds(InBounds);
2946       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2947         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2948     } else {
2949       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2950       PartPtr = cast<GetElementPtrInst>(
2951           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2952       PartPtr->setIsInBounds(InBounds);
2953     }
2954 
2955     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2956     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2957   };
2958 
2959   // Handle Stores:
2960   if (SI) {
2961     setDebugLocFromInst(SI);
2962 
2963     for (unsigned Part = 0; Part < UF; ++Part) {
2964       Instruction *NewSI = nullptr;
2965       Value *StoredVal = State.get(StoredValue, Part);
2966       if (CreateGatherScatter) {
2967         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2968         Value *VectorGep = State.get(Addr, Part);
2969         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2970                                             MaskPart);
2971       } else {
2972         if (Reverse) {
2973           // If we store to reverse consecutive memory locations, then we need
2974           // to reverse the order of elements in the stored value.
2975           StoredVal = reverseVector(StoredVal);
2976           // We don't want to update the value in the map as it might be used in
2977           // another expression. So don't call resetVectorValue(StoredVal).
2978         }
2979         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2980         if (isMaskRequired)
2981           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2982                                             BlockInMaskParts[Part]);
2983         else
2984           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2985       }
2986       addMetadata(NewSI, SI);
2987     }
2988     return;
2989   }
2990 
2991   // Handle loads.
2992   assert(LI && "Must have a load instruction");
2993   setDebugLocFromInst(LI);
2994   for (unsigned Part = 0; Part < UF; ++Part) {
2995     Value *NewLI;
2996     if (CreateGatherScatter) {
2997       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2998       Value *VectorGep = State.get(Addr, Part);
2999       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
3000                                          nullptr, "wide.masked.gather");
3001       addMetadata(NewLI, LI);
3002     } else {
3003       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
3004       if (isMaskRequired)
3005         NewLI = Builder.CreateMaskedLoad(
3006             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
3007             PoisonValue::get(DataTy), "wide.masked.load");
3008       else
3009         NewLI =
3010             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
3011 
3012       // Add metadata to the load, but setVectorValue to the reverse shuffle.
3013       addMetadata(NewLI, LI);
3014       if (Reverse)
3015         NewLI = reverseVector(NewLI);
3016     }
3017 
3018     State.set(Def, NewLI, Part);
3019   }
3020 }
3021 
3022 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
3023                                                VPUser &User,
3024                                                const VPIteration &Instance,
3025                                                bool IfPredicateInstr,
3026                                                VPTransformState &State) {
3027   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3028 
3029   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
3030   // the first lane and part.
3031   if (isa<NoAliasScopeDeclInst>(Instr))
3032     if (!Instance.isFirstIteration())
3033       return;
3034 
3035   setDebugLocFromInst(Instr);
3036 
3037   // Does this instruction return a value ?
3038   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3039 
3040   Instruction *Cloned = Instr->clone();
3041   if (!IsVoidRetTy)
3042     Cloned->setName(Instr->getName() + ".cloned");
3043 
3044   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3045                                Builder.GetInsertPoint());
3046   // Replace the operands of the cloned instructions with their scalar
3047   // equivalents in the new loop.
3048   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
3049     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3050     auto InputInstance = Instance;
3051     if (!Operand || !OrigLoop->contains(Operand) ||
3052         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3053       InputInstance.Lane = VPLane::getFirstLane();
3054     auto *NewOp = State.get(User.getOperand(op), InputInstance);
3055     Cloned->setOperand(op, NewOp);
3056   }
3057   addNewMetadata(Cloned, Instr);
3058 
3059   // Place the cloned scalar in the new loop.
3060   Builder.Insert(Cloned);
3061 
3062   State.set(Def, Cloned, Instance);
3063 
3064   // If we just cloned a new assumption, add it the assumption cache.
3065   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3066     AC->registerAssumption(II);
3067 
3068   // End if-block.
3069   if (IfPredicateInstr)
3070     PredicatedInstructions.push_back(Cloned);
3071 }
3072 
3073 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3074                                                       Value *End, Value *Step,
3075                                                       Instruction *DL) {
3076   BasicBlock *Header = L->getHeader();
3077   BasicBlock *Latch = L->getLoopLatch();
3078   // As we're just creating this loop, it's possible no latch exists
3079   // yet. If so, use the header as this will be a single block loop.
3080   if (!Latch)
3081     Latch = Header;
3082 
3083   IRBuilder<> B(&*Header->getFirstInsertionPt());
3084   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3085   setDebugLocFromInst(OldInst, &B);
3086   auto *Induction = B.CreatePHI(Start->getType(), 2, "index");
3087 
3088   B.SetInsertPoint(Latch->getTerminator());
3089   setDebugLocFromInst(OldInst, &B);
3090 
3091   // Create i+1 and fill the PHINode.
3092   //
3093   // If the tail is not folded, we know that End - Start >= Step (either
3094   // statically or through the minimum iteration checks). We also know that both
3095   // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV +
3096   // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned
3097   // overflows and we can mark the induction increment as NUW.
3098   Value *Next = B.CreateAdd(Induction, Step, "index.next",
3099                             /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false);
3100   Induction->addIncoming(Start, L->getLoopPreheader());
3101   Induction->addIncoming(Next, Latch);
3102   // Create the compare.
3103   Value *ICmp = B.CreateICmpEQ(Next, End);
3104   B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3105 
3106   // Now we have two terminators. Remove the old one from the block.
3107   Latch->getTerminator()->eraseFromParent();
3108 
3109   return Induction;
3110 }
3111 
3112 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3113   if (TripCount)
3114     return TripCount;
3115 
3116   assert(L && "Create Trip Count for null loop.");
3117   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3118   // Find the loop boundaries.
3119   ScalarEvolution *SE = PSE.getSE();
3120   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3121   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3122          "Invalid loop count");
3123 
3124   Type *IdxTy = Legal->getWidestInductionType();
3125   assert(IdxTy && "No type for induction");
3126 
3127   // The exit count might have the type of i64 while the phi is i32. This can
3128   // happen if we have an induction variable that is sign extended before the
3129   // compare. The only way that we get a backedge taken count is that the
3130   // induction variable was signed and as such will not overflow. In such a case
3131   // truncation is legal.
3132   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3133       IdxTy->getPrimitiveSizeInBits())
3134     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3135   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3136 
3137   // Get the total trip count from the count by adding 1.
3138   const SCEV *ExitCount = SE->getAddExpr(
3139       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3140 
3141   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3142 
3143   // Expand the trip count and place the new instructions in the preheader.
3144   // Notice that the pre-header does not change, only the loop body.
3145   SCEVExpander Exp(*SE, DL, "induction");
3146 
3147   // Count holds the overall loop count (N).
3148   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3149                                 L->getLoopPreheader()->getTerminator());
3150 
3151   if (TripCount->getType()->isPointerTy())
3152     TripCount =
3153         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3154                                     L->getLoopPreheader()->getTerminator());
3155 
3156   return TripCount;
3157 }
3158 
3159 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3160   if (VectorTripCount)
3161     return VectorTripCount;
3162 
3163   Value *TC = getOrCreateTripCount(L);
3164   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3165 
3166   Type *Ty = TC->getType();
3167   // This is where we can make the step a runtime constant.
3168   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3169 
3170   // If the tail is to be folded by masking, round the number of iterations N
3171   // up to a multiple of Step instead of rounding down. This is done by first
3172   // adding Step-1 and then rounding down. Note that it's ok if this addition
3173   // overflows: the vector induction variable will eventually wrap to zero given
3174   // that it starts at zero and its Step is a power of two; the loop will then
3175   // exit, with the last early-exit vector comparison also producing all-true.
3176   if (Cost->foldTailByMasking()) {
3177     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3178            "VF*UF must be a power of 2 when folding tail by masking");
3179     assert(!VF.isScalable() &&
3180            "Tail folding not yet supported for scalable vectors");
3181     TC = Builder.CreateAdd(
3182         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3183   }
3184 
3185   // Now we need to generate the expression for the part of the loop that the
3186   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3187   // iterations are not required for correctness, or N - Step, otherwise. Step
3188   // is equal to the vectorization factor (number of SIMD elements) times the
3189   // unroll factor (number of SIMD instructions).
3190   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3191 
3192   // There are cases where we *must* run at least one iteration in the remainder
3193   // loop.  See the cost model for when this can happen.  If the step evenly
3194   // divides the trip count, we set the remainder to be equal to the step. If
3195   // the step does not evenly divide the trip count, no adjustment is necessary
3196   // since there will already be scalar iterations. Note that the minimum
3197   // iterations check ensures that N >= Step.
3198   if (Cost->requiresScalarEpilogue(VF)) {
3199     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3200     R = Builder.CreateSelect(IsZero, Step, R);
3201   }
3202 
3203   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3204 
3205   return VectorTripCount;
3206 }
3207 
3208 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3209                                                    const DataLayout &DL) {
3210   // Verify that V is a vector type with same number of elements as DstVTy.
3211   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3212   unsigned VF = DstFVTy->getNumElements();
3213   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3214   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3215   Type *SrcElemTy = SrcVecTy->getElementType();
3216   Type *DstElemTy = DstFVTy->getElementType();
3217   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3218          "Vector elements must have same size");
3219 
3220   // Do a direct cast if element types are castable.
3221   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3222     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3223   }
3224   // V cannot be directly casted to desired vector type.
3225   // May happen when V is a floating point vector but DstVTy is a vector of
3226   // pointers or vice-versa. Handle this using a two-step bitcast using an
3227   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3228   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3229          "Only one type should be a pointer type");
3230   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3231          "Only one type should be a floating point type");
3232   Type *IntTy =
3233       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3234   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3235   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3236   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3237 }
3238 
3239 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3240                                                          BasicBlock *Bypass) {
3241   Value *Count = getOrCreateTripCount(L);
3242   // Reuse existing vector loop preheader for TC checks.
3243   // Note that new preheader block is generated for vector loop.
3244   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3245   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3246 
3247   // Generate code to check if the loop's trip count is less than VF * UF, or
3248   // equal to it in case a scalar epilogue is required; this implies that the
3249   // vector trip count is zero. This check also covers the case where adding one
3250   // to the backedge-taken count overflowed leading to an incorrect trip count
3251   // of zero. In this case we will also jump to the scalar loop.
3252   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3253                                             : ICmpInst::ICMP_ULT;
3254 
3255   // If tail is to be folded, vector loop takes care of all iterations.
3256   Value *CheckMinIters = Builder.getFalse();
3257   if (!Cost->foldTailByMasking()) {
3258     Value *Step =
3259         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3260     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3261   }
3262   // Create new preheader for vector loop.
3263   LoopVectorPreHeader =
3264       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3265                  "vector.ph");
3266 
3267   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3268                                DT->getNode(Bypass)->getIDom()) &&
3269          "TC check is expected to dominate Bypass");
3270 
3271   // Update dominator for Bypass & LoopExit.
3272   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3273   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3274 
3275   ReplaceInstWithInst(
3276       TCCheckBlock->getTerminator(),
3277       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3278   LoopBypassBlocks.push_back(TCCheckBlock);
3279 }
3280 
3281 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3282 
3283   BasicBlock *const SCEVCheckBlock =
3284       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3285   if (!SCEVCheckBlock)
3286     return nullptr;
3287 
3288   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3289            (OptForSizeBasedOnProfile &&
3290             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3291          "Cannot SCEV check stride or overflow when optimizing for size");
3292 
3293 
3294   // Update dominator only if this is first RT check.
3295   if (LoopBypassBlocks.empty()) {
3296     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3297     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3298   }
3299 
3300   LoopBypassBlocks.push_back(SCEVCheckBlock);
3301   AddedSafetyChecks = true;
3302   return SCEVCheckBlock;
3303 }
3304 
3305 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3306                                                       BasicBlock *Bypass) {
3307   // VPlan-native path does not do any analysis for runtime checks currently.
3308   if (EnableVPlanNativePath)
3309     return nullptr;
3310 
3311   BasicBlock *const MemCheckBlock =
3312       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3313 
3314   // Check if we generated code that checks in runtime if arrays overlap. We put
3315   // the checks into a separate block to make the more common case of few
3316   // elements faster.
3317   if (!MemCheckBlock)
3318     return nullptr;
3319 
3320   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3321     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3322            "Cannot emit memory checks when optimizing for size, unless forced "
3323            "to vectorize.");
3324     ORE->emit([&]() {
3325       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3326                                         L->getStartLoc(), L->getHeader())
3327              << "Code-size may be reduced by not forcing "
3328                 "vectorization, or by source-code modifications "
3329                 "eliminating the need for runtime checks "
3330                 "(e.g., adding 'restrict').";
3331     });
3332   }
3333 
3334   LoopBypassBlocks.push_back(MemCheckBlock);
3335 
3336   AddedSafetyChecks = true;
3337 
3338   // We currently don't use LoopVersioning for the actual loop cloning but we
3339   // still use it to add the noalias metadata.
3340   LVer = std::make_unique<LoopVersioning>(
3341       *Legal->getLAI(),
3342       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3343       DT, PSE.getSE());
3344   LVer->prepareNoAliasMetadata();
3345   return MemCheckBlock;
3346 }
3347 
3348 Value *InnerLoopVectorizer::emitTransformedIndex(
3349     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3350     const InductionDescriptor &ID) const {
3351 
3352   SCEVExpander Exp(*SE, DL, "induction");
3353   auto Step = ID.getStep();
3354   auto StartValue = ID.getStartValue();
3355   assert(Index->getType()->getScalarType() == Step->getType() &&
3356          "Index scalar type does not match StepValue type");
3357 
3358   // Note: the IR at this point is broken. We cannot use SE to create any new
3359   // SCEV and then expand it, hoping that SCEV's simplification will give us
3360   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3361   // lead to various SCEV crashes. So all we can do is to use builder and rely
3362   // on InstCombine for future simplifications. Here we handle some trivial
3363   // cases only.
3364   auto CreateAdd = [&B](Value *X, Value *Y) {
3365     assert(X->getType() == Y->getType() && "Types don't match!");
3366     if (auto *CX = dyn_cast<ConstantInt>(X))
3367       if (CX->isZero())
3368         return Y;
3369     if (auto *CY = dyn_cast<ConstantInt>(Y))
3370       if (CY->isZero())
3371         return X;
3372     return B.CreateAdd(X, Y);
3373   };
3374 
3375   // We allow X to be a vector type, in which case Y will potentially be
3376   // splatted into a vector with the same element count.
3377   auto CreateMul = [&B](Value *X, Value *Y) {
3378     assert(X->getType()->getScalarType() == Y->getType() &&
3379            "Types don't match!");
3380     if (auto *CX = dyn_cast<ConstantInt>(X))
3381       if (CX->isOne())
3382         return Y;
3383     if (auto *CY = dyn_cast<ConstantInt>(Y))
3384       if (CY->isOne())
3385         return X;
3386     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3387     if (XVTy && !isa<VectorType>(Y->getType()))
3388       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3389     return B.CreateMul(X, Y);
3390   };
3391 
3392   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3393   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3394   // the DomTree is not kept up-to-date for additional blocks generated in the
3395   // vector loop. By using the header as insertion point, we guarantee that the
3396   // expanded instructions dominate all their uses.
3397   auto GetInsertPoint = [this, &B]() {
3398     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3399     if (InsertBB != LoopVectorBody &&
3400         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3401       return LoopVectorBody->getTerminator();
3402     return &*B.GetInsertPoint();
3403   };
3404 
3405   switch (ID.getKind()) {
3406   case InductionDescriptor::IK_IntInduction: {
3407     assert(!isa<VectorType>(Index->getType()) &&
3408            "Vector indices not supported for integer inductions yet");
3409     assert(Index->getType() == StartValue->getType() &&
3410            "Index type does not match StartValue type");
3411     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3412       return B.CreateSub(StartValue, Index);
3413     auto *Offset = CreateMul(
3414         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3415     return CreateAdd(StartValue, Offset);
3416   }
3417   case InductionDescriptor::IK_PtrInduction: {
3418     assert(isa<SCEVConstant>(Step) &&
3419            "Expected constant step for pointer induction");
3420     return B.CreateGEP(
3421         StartValue->getType()->getPointerElementType(), StartValue,
3422         CreateMul(Index,
3423                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3424                                     GetInsertPoint())));
3425   }
3426   case InductionDescriptor::IK_FpInduction: {
3427     assert(!isa<VectorType>(Index->getType()) &&
3428            "Vector indices not supported for FP inductions yet");
3429     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3430     auto InductionBinOp = ID.getInductionBinOp();
3431     assert(InductionBinOp &&
3432            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3433             InductionBinOp->getOpcode() == Instruction::FSub) &&
3434            "Original bin op should be defined for FP induction");
3435 
3436     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3437     Value *MulExp = B.CreateFMul(StepValue, Index);
3438     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3439                          "induction");
3440   }
3441   case InductionDescriptor::IK_NoInduction:
3442     return nullptr;
3443   }
3444   llvm_unreachable("invalid enum");
3445 }
3446 
3447 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3448   LoopScalarBody = OrigLoop->getHeader();
3449   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3450   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3451   assert(LoopExitBlock && "Must have an exit block");
3452   assert(LoopVectorPreHeader && "Invalid loop structure");
3453 
3454   LoopMiddleBlock =
3455       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3456                  LI, nullptr, Twine(Prefix) + "middle.block");
3457   LoopScalarPreHeader =
3458       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3459                  nullptr, Twine(Prefix) + "scalar.ph");
3460 
3461   // Set up branch from middle block to the exit and scalar preheader blocks.
3462   // completeLoopSkeleton will update the condition to use an iteration check,
3463   // if required to decide whether to execute the remainder.
3464   BranchInst *BrInst =
3465       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3466   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3467   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3468   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3469 
3470   // We intentionally don't let SplitBlock to update LoopInfo since
3471   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3472   // LoopVectorBody is explicitly added to the correct place few lines later.
3473   LoopVectorBody =
3474       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3475                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3476 
3477   // Update dominator for loop exit.
3478   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3479 
3480   // Create and register the new vector loop.
3481   Loop *Lp = LI->AllocateLoop();
3482   Loop *ParentLoop = OrigLoop->getParentLoop();
3483 
3484   // Insert the new loop into the loop nest and register the new basic blocks
3485   // before calling any utilities such as SCEV that require valid LoopInfo.
3486   if (ParentLoop) {
3487     ParentLoop->addChildLoop(Lp);
3488   } else {
3489     LI->addTopLevelLoop(Lp);
3490   }
3491   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3492   return Lp;
3493 }
3494 
3495 void InnerLoopVectorizer::createInductionResumeValues(
3496     Loop *L, Value *VectorTripCount,
3497     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3498   assert(VectorTripCount && L && "Expected valid arguments");
3499   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3500           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3501          "Inconsistent information about additional bypass.");
3502   // We are going to resume the execution of the scalar loop.
3503   // Go over all of the induction variables that we found and fix the
3504   // PHIs that are left in the scalar version of the loop.
3505   // The starting values of PHI nodes depend on the counter of the last
3506   // iteration in the vectorized loop.
3507   // If we come from a bypass edge then we need to start from the original
3508   // start value.
3509   for (auto &InductionEntry : Legal->getInductionVars()) {
3510     PHINode *OrigPhi = InductionEntry.first;
3511     InductionDescriptor II = InductionEntry.second;
3512 
3513     // Create phi nodes to merge from the  backedge-taken check block.
3514     PHINode *BCResumeVal =
3515         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3516                         LoopScalarPreHeader->getTerminator());
3517     // Copy original phi DL over to the new one.
3518     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3519     Value *&EndValue = IVEndValues[OrigPhi];
3520     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3521     if (OrigPhi == OldInduction) {
3522       // We know what the end value is.
3523       EndValue = VectorTripCount;
3524     } else {
3525       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3526 
3527       // Fast-math-flags propagate from the original induction instruction.
3528       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3529         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3530 
3531       Type *StepType = II.getStep()->getType();
3532       Instruction::CastOps CastOp =
3533           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3534       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3535       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3536       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3537       EndValue->setName("ind.end");
3538 
3539       // Compute the end value for the additional bypass (if applicable).
3540       if (AdditionalBypass.first) {
3541         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3542         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3543                                          StepType, true);
3544         CRD =
3545             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3546         EndValueFromAdditionalBypass =
3547             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3548         EndValueFromAdditionalBypass->setName("ind.end");
3549       }
3550     }
3551     // The new PHI merges the original incoming value, in case of a bypass,
3552     // or the value at the end of the vectorized loop.
3553     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3554 
3555     // Fix the scalar body counter (PHI node).
3556     // The old induction's phi node in the scalar body needs the truncated
3557     // value.
3558     for (BasicBlock *BB : LoopBypassBlocks)
3559       BCResumeVal->addIncoming(II.getStartValue(), BB);
3560 
3561     if (AdditionalBypass.first)
3562       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3563                                             EndValueFromAdditionalBypass);
3564 
3565     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3566   }
3567 }
3568 
3569 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3570                                                       MDNode *OrigLoopID) {
3571   assert(L && "Expected valid loop.");
3572 
3573   // The trip counts should be cached by now.
3574   Value *Count = getOrCreateTripCount(L);
3575   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3576 
3577   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3578 
3579   // Add a check in the middle block to see if we have completed
3580   // all of the iterations in the first vector loop.
3581   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3582   // If tail is to be folded, we know we don't need to run the remainder.
3583   if (!Cost->foldTailByMasking()) {
3584     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3585                                         Count, VectorTripCount, "cmp.n",
3586                                         LoopMiddleBlock->getTerminator());
3587 
3588     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3589     // of the corresponding compare because they may have ended up with
3590     // different line numbers and we want to avoid awkward line stepping while
3591     // debugging. Eg. if the compare has got a line number inside the loop.
3592     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3593     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3594   }
3595 
3596   // Get ready to start creating new instructions into the vectorized body.
3597   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3598          "Inconsistent vector loop preheader");
3599   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3600 
3601   Optional<MDNode *> VectorizedLoopID =
3602       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3603                                       LLVMLoopVectorizeFollowupVectorized});
3604   if (VectorizedLoopID.hasValue()) {
3605     L->setLoopID(VectorizedLoopID.getValue());
3606 
3607     // Do not setAlreadyVectorized if loop attributes have been defined
3608     // explicitly.
3609     return LoopVectorPreHeader;
3610   }
3611 
3612   // Keep all loop hints from the original loop on the vector loop (we'll
3613   // replace the vectorizer-specific hints below).
3614   if (MDNode *LID = OrigLoop->getLoopID())
3615     L->setLoopID(LID);
3616 
3617   LoopVectorizeHints Hints(L, true, *ORE);
3618   Hints.setAlreadyVectorized();
3619 
3620 #ifdef EXPENSIVE_CHECKS
3621   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3622   LI->verify(*DT);
3623 #endif
3624 
3625   return LoopVectorPreHeader;
3626 }
3627 
3628 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3629   /*
3630    In this function we generate a new loop. The new loop will contain
3631    the vectorized instructions while the old loop will continue to run the
3632    scalar remainder.
3633 
3634        [ ] <-- loop iteration number check.
3635     /   |
3636    /    v
3637   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3638   |  /  |
3639   | /   v
3640   ||   [ ]     <-- vector pre header.
3641   |/    |
3642   |     v
3643   |    [  ] \
3644   |    [  ]_|   <-- vector loop.
3645   |     |
3646   |     v
3647   |   -[ ]   <--- middle-block.
3648   |  /  |
3649   | /   v
3650   -|- >[ ]     <--- new preheader.
3651    |    |
3652    |    v
3653    |   [ ] \
3654    |   [ ]_|   <-- old scalar loop to handle remainder.
3655     \   |
3656      \  v
3657       >[ ]     <-- exit block.
3658    ...
3659    */
3660 
3661   // Get the metadata of the original loop before it gets modified.
3662   MDNode *OrigLoopID = OrigLoop->getLoopID();
3663 
3664   // Workaround!  Compute the trip count of the original loop and cache it
3665   // before we start modifying the CFG.  This code has a systemic problem
3666   // wherein it tries to run analysis over partially constructed IR; this is
3667   // wrong, and not simply for SCEV.  The trip count of the original loop
3668   // simply happens to be prone to hitting this in practice.  In theory, we
3669   // can hit the same issue for any SCEV, or ValueTracking query done during
3670   // mutation.  See PR49900.
3671   getOrCreateTripCount(OrigLoop);
3672 
3673   // Create an empty vector loop, and prepare basic blocks for the runtime
3674   // checks.
3675   Loop *Lp = createVectorLoopSkeleton("");
3676 
3677   // Now, compare the new count to zero. If it is zero skip the vector loop and
3678   // jump to the scalar loop. This check also covers the case where the
3679   // backedge-taken count is uint##_max: adding one to it will overflow leading
3680   // to an incorrect trip count of zero. In this (rare) case we will also jump
3681   // to the scalar loop.
3682   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3683 
3684   // Generate the code to check any assumptions that we've made for SCEV
3685   // expressions.
3686   emitSCEVChecks(Lp, LoopScalarPreHeader);
3687 
3688   // Generate the code that checks in runtime if arrays overlap. We put the
3689   // checks into a separate block to make the more common case of few elements
3690   // faster.
3691   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3692 
3693   // Some loops have a single integer induction variable, while other loops
3694   // don't. One example is c++ iterators that often have multiple pointer
3695   // induction variables. In the code below we also support a case where we
3696   // don't have a single induction variable.
3697   //
3698   // We try to obtain an induction variable from the original loop as hard
3699   // as possible. However if we don't find one that:
3700   //   - is an integer
3701   //   - counts from zero, stepping by one
3702   //   - is the size of the widest induction variable type
3703   // then we create a new one.
3704   OldInduction = Legal->getPrimaryInduction();
3705   Type *IdxTy = Legal->getWidestInductionType();
3706   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3707   // The loop step is equal to the vectorization factor (num of SIMD elements)
3708   // times the unroll factor (num of SIMD instructions).
3709   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3710   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3711   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3712   Induction =
3713       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3714                               getDebugLocFromInstOrOperands(OldInduction));
3715 
3716   // Emit phis for the new starting index of the scalar loop.
3717   createInductionResumeValues(Lp, CountRoundDown);
3718 
3719   return completeLoopSkeleton(Lp, OrigLoopID);
3720 }
3721 
3722 // Fix up external users of the induction variable. At this point, we are
3723 // in LCSSA form, with all external PHIs that use the IV having one input value,
3724 // coming from the remainder loop. We need those PHIs to also have a correct
3725 // value for the IV when arriving directly from the middle block.
3726 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3727                                        const InductionDescriptor &II,
3728                                        Value *CountRoundDown, Value *EndValue,
3729                                        BasicBlock *MiddleBlock) {
3730   // There are two kinds of external IV usages - those that use the value
3731   // computed in the last iteration (the PHI) and those that use the penultimate
3732   // value (the value that feeds into the phi from the loop latch).
3733   // We allow both, but they, obviously, have different values.
3734 
3735   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3736 
3737   DenseMap<Value *, Value *> MissingVals;
3738 
3739   // An external user of the last iteration's value should see the value that
3740   // the remainder loop uses to initialize its own IV.
3741   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3742   for (User *U : PostInc->users()) {
3743     Instruction *UI = cast<Instruction>(U);
3744     if (!OrigLoop->contains(UI)) {
3745       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3746       MissingVals[UI] = EndValue;
3747     }
3748   }
3749 
3750   // An external user of the penultimate value need to see EndValue - Step.
3751   // The simplest way to get this is to recompute it from the constituent SCEVs,
3752   // that is Start + (Step * (CRD - 1)).
3753   for (User *U : OrigPhi->users()) {
3754     auto *UI = cast<Instruction>(U);
3755     if (!OrigLoop->contains(UI)) {
3756       const DataLayout &DL =
3757           OrigLoop->getHeader()->getModule()->getDataLayout();
3758       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3759 
3760       IRBuilder<> B(MiddleBlock->getTerminator());
3761 
3762       // Fast-math-flags propagate from the original induction instruction.
3763       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3764         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3765 
3766       Value *CountMinusOne = B.CreateSub(
3767           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3768       Value *CMO =
3769           !II.getStep()->getType()->isIntegerTy()
3770               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3771                              II.getStep()->getType())
3772               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3773       CMO->setName("cast.cmo");
3774       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3775       Escape->setName("ind.escape");
3776       MissingVals[UI] = Escape;
3777     }
3778   }
3779 
3780   for (auto &I : MissingVals) {
3781     PHINode *PHI = cast<PHINode>(I.first);
3782     // One corner case we have to handle is two IVs "chasing" each-other,
3783     // that is %IV2 = phi [...], [ %IV1, %latch ]
3784     // In this case, if IV1 has an external use, we need to avoid adding both
3785     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3786     // don't already have an incoming value for the middle block.
3787     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3788       PHI->addIncoming(I.second, MiddleBlock);
3789   }
3790 }
3791 
3792 namespace {
3793 
3794 struct CSEDenseMapInfo {
3795   static bool canHandle(const Instruction *I) {
3796     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3797            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3798   }
3799 
3800   static inline Instruction *getEmptyKey() {
3801     return DenseMapInfo<Instruction *>::getEmptyKey();
3802   }
3803 
3804   static inline Instruction *getTombstoneKey() {
3805     return DenseMapInfo<Instruction *>::getTombstoneKey();
3806   }
3807 
3808   static unsigned getHashValue(const Instruction *I) {
3809     assert(canHandle(I) && "Unknown instruction!");
3810     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3811                                                            I->value_op_end()));
3812   }
3813 
3814   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3815     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3816         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3817       return LHS == RHS;
3818     return LHS->isIdenticalTo(RHS);
3819   }
3820 };
3821 
3822 } // end anonymous namespace
3823 
3824 ///Perform cse of induction variable instructions.
3825 static void cse(BasicBlock *BB) {
3826   // Perform simple cse.
3827   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3828   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3829     Instruction *In = &*I++;
3830 
3831     if (!CSEDenseMapInfo::canHandle(In))
3832       continue;
3833 
3834     // Check if we can replace this instruction with any of the
3835     // visited instructions.
3836     if (Instruction *V = CSEMap.lookup(In)) {
3837       In->replaceAllUsesWith(V);
3838       In->eraseFromParent();
3839       continue;
3840     }
3841 
3842     CSEMap[In] = In;
3843   }
3844 }
3845 
3846 InstructionCost
3847 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3848                                               bool &NeedToScalarize) const {
3849   Function *F = CI->getCalledFunction();
3850   Type *ScalarRetTy = CI->getType();
3851   SmallVector<Type *, 4> Tys, ScalarTys;
3852   for (auto &ArgOp : CI->arg_operands())
3853     ScalarTys.push_back(ArgOp->getType());
3854 
3855   // Estimate cost of scalarized vector call. The source operands are assumed
3856   // to be vectors, so we need to extract individual elements from there,
3857   // execute VF scalar calls, and then gather the result into the vector return
3858   // value.
3859   InstructionCost ScalarCallCost =
3860       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3861   if (VF.isScalar())
3862     return ScalarCallCost;
3863 
3864   // Compute corresponding vector type for return value and arguments.
3865   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3866   for (Type *ScalarTy : ScalarTys)
3867     Tys.push_back(ToVectorTy(ScalarTy, VF));
3868 
3869   // Compute costs of unpacking argument values for the scalar calls and
3870   // packing the return values to a vector.
3871   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3872 
3873   InstructionCost Cost =
3874       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3875 
3876   // If we can't emit a vector call for this function, then the currently found
3877   // cost is the cost we need to return.
3878   NeedToScalarize = true;
3879   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3880   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3881 
3882   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3883     return Cost;
3884 
3885   // If the corresponding vector cost is cheaper, return its cost.
3886   InstructionCost VectorCallCost =
3887       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3888   if (VectorCallCost < Cost) {
3889     NeedToScalarize = false;
3890     Cost = VectorCallCost;
3891   }
3892   return Cost;
3893 }
3894 
3895 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3896   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3897     return Elt;
3898   return VectorType::get(Elt, VF);
3899 }
3900 
3901 InstructionCost
3902 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3903                                                    ElementCount VF) const {
3904   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3905   assert(ID && "Expected intrinsic call!");
3906   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3907   FastMathFlags FMF;
3908   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3909     FMF = FPMO->getFastMathFlags();
3910 
3911   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3912   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3913   SmallVector<Type *> ParamTys;
3914   std::transform(FTy->param_begin(), FTy->param_end(),
3915                  std::back_inserter(ParamTys),
3916                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3917 
3918   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3919                                     dyn_cast<IntrinsicInst>(CI));
3920   return TTI.getIntrinsicInstrCost(CostAttrs,
3921                                    TargetTransformInfo::TCK_RecipThroughput);
3922 }
3923 
3924 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3925   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3926   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3927   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3928 }
3929 
3930 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3931   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3932   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3933   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3934 }
3935 
3936 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3937   // For every instruction `I` in MinBWs, truncate the operands, create a
3938   // truncated version of `I` and reextend its result. InstCombine runs
3939   // later and will remove any ext/trunc pairs.
3940   SmallPtrSet<Value *, 4> Erased;
3941   for (const auto &KV : Cost->getMinimalBitwidths()) {
3942     // If the value wasn't vectorized, we must maintain the original scalar
3943     // type. The absence of the value from State indicates that it
3944     // wasn't vectorized.
3945     VPValue *Def = State.Plan->getVPValue(KV.first);
3946     if (!State.hasAnyVectorValue(Def))
3947       continue;
3948     for (unsigned Part = 0; Part < UF; ++Part) {
3949       Value *I = State.get(Def, Part);
3950       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3951         continue;
3952       Type *OriginalTy = I->getType();
3953       Type *ScalarTruncatedTy =
3954           IntegerType::get(OriginalTy->getContext(), KV.second);
3955       auto *TruncatedTy = VectorType::get(
3956           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3957       if (TruncatedTy == OriginalTy)
3958         continue;
3959 
3960       IRBuilder<> B(cast<Instruction>(I));
3961       auto ShrinkOperand = [&](Value *V) -> Value * {
3962         if (auto *ZI = dyn_cast<ZExtInst>(V))
3963           if (ZI->getSrcTy() == TruncatedTy)
3964             return ZI->getOperand(0);
3965         return B.CreateZExtOrTrunc(V, TruncatedTy);
3966       };
3967 
3968       // The actual instruction modification depends on the instruction type,
3969       // unfortunately.
3970       Value *NewI = nullptr;
3971       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3972         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3973                              ShrinkOperand(BO->getOperand(1)));
3974 
3975         // Any wrapping introduced by shrinking this operation shouldn't be
3976         // considered undefined behavior. So, we can't unconditionally copy
3977         // arithmetic wrapping flags to NewI.
3978         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3979       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3980         NewI =
3981             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3982                          ShrinkOperand(CI->getOperand(1)));
3983       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3984         NewI = B.CreateSelect(SI->getCondition(),
3985                               ShrinkOperand(SI->getTrueValue()),
3986                               ShrinkOperand(SI->getFalseValue()));
3987       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3988         switch (CI->getOpcode()) {
3989         default:
3990           llvm_unreachable("Unhandled cast!");
3991         case Instruction::Trunc:
3992           NewI = ShrinkOperand(CI->getOperand(0));
3993           break;
3994         case Instruction::SExt:
3995           NewI = B.CreateSExtOrTrunc(
3996               CI->getOperand(0),
3997               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3998           break;
3999         case Instruction::ZExt:
4000           NewI = B.CreateZExtOrTrunc(
4001               CI->getOperand(0),
4002               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4003           break;
4004         }
4005       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
4006         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
4007                              ->getNumElements();
4008         auto *O0 = B.CreateZExtOrTrunc(
4009             SI->getOperand(0),
4010             FixedVectorType::get(ScalarTruncatedTy, Elements0));
4011         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
4012                              ->getNumElements();
4013         auto *O1 = B.CreateZExtOrTrunc(
4014             SI->getOperand(1),
4015             FixedVectorType::get(ScalarTruncatedTy, Elements1));
4016 
4017         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
4018       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
4019         // Don't do anything with the operands, just extend the result.
4020         continue;
4021       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
4022         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
4023                             ->getNumElements();
4024         auto *O0 = B.CreateZExtOrTrunc(
4025             IE->getOperand(0),
4026             FixedVectorType::get(ScalarTruncatedTy, Elements));
4027         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4028         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4029       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4030         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
4031                             ->getNumElements();
4032         auto *O0 = B.CreateZExtOrTrunc(
4033             EE->getOperand(0),
4034             FixedVectorType::get(ScalarTruncatedTy, Elements));
4035         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4036       } else {
4037         // If we don't know what to do, be conservative and don't do anything.
4038         continue;
4039       }
4040 
4041       // Lastly, extend the result.
4042       NewI->takeName(cast<Instruction>(I));
4043       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4044       I->replaceAllUsesWith(Res);
4045       cast<Instruction>(I)->eraseFromParent();
4046       Erased.insert(I);
4047       State.reset(Def, Res, Part);
4048     }
4049   }
4050 
4051   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4052   for (const auto &KV : Cost->getMinimalBitwidths()) {
4053     // If the value wasn't vectorized, we must maintain the original scalar
4054     // type. The absence of the value from State indicates that it
4055     // wasn't vectorized.
4056     VPValue *Def = State.Plan->getVPValue(KV.first);
4057     if (!State.hasAnyVectorValue(Def))
4058       continue;
4059     for (unsigned Part = 0; Part < UF; ++Part) {
4060       Value *I = State.get(Def, Part);
4061       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4062       if (Inst && Inst->use_empty()) {
4063         Value *NewI = Inst->getOperand(0);
4064         Inst->eraseFromParent();
4065         State.reset(Def, NewI, Part);
4066       }
4067     }
4068   }
4069 }
4070 
4071 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4072   // Insert truncates and extends for any truncated instructions as hints to
4073   // InstCombine.
4074   if (VF.isVector())
4075     truncateToMinimalBitwidths(State);
4076 
4077   // Fix widened non-induction PHIs by setting up the PHI operands.
4078   if (OrigPHIsToFix.size()) {
4079     assert(EnableVPlanNativePath &&
4080            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4081     fixNonInductionPHIs(State);
4082   }
4083 
4084   // At this point every instruction in the original loop is widened to a
4085   // vector form. Now we need to fix the recurrences in the loop. These PHI
4086   // nodes are currently empty because we did not want to introduce cycles.
4087   // This is the second stage of vectorizing recurrences.
4088   fixCrossIterationPHIs(State);
4089 
4090   // Forget the original basic block.
4091   PSE.getSE()->forgetLoop(OrigLoop);
4092 
4093   // Fix-up external users of the induction variables.
4094   for (auto &Entry : Legal->getInductionVars())
4095     fixupIVUsers(Entry.first, Entry.second,
4096                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4097                  IVEndValues[Entry.first], LoopMiddleBlock);
4098 
4099   fixLCSSAPHIs(State);
4100   for (Instruction *PI : PredicatedInstructions)
4101     sinkScalarOperands(&*PI);
4102 
4103   // Remove redundant induction instructions.
4104   cse(LoopVectorBody);
4105 
4106   // Set/update profile weights for the vector and remainder loops as original
4107   // loop iterations are now distributed among them. Note that original loop
4108   // represented by LoopScalarBody becomes remainder loop after vectorization.
4109   //
4110   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4111   // end up getting slightly roughened result but that should be OK since
4112   // profile is not inherently precise anyway. Note also possible bypass of
4113   // vector code caused by legality checks is ignored, assigning all the weight
4114   // to the vector loop, optimistically.
4115   //
4116   // For scalable vectorization we can't know at compile time how many iterations
4117   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4118   // vscale of '1'.
4119   setProfileInfoAfterUnrolling(
4120       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4121       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4122 }
4123 
4124 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4125   // In order to support recurrences we need to be able to vectorize Phi nodes.
4126   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4127   // stage #2: We now need to fix the recurrences by adding incoming edges to
4128   // the currently empty PHI nodes. At this point every instruction in the
4129   // original loop is widened to a vector form so we can use them to construct
4130   // the incoming edges.
4131   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4132   for (VPRecipeBase &R : Header->phis()) {
4133     auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R);
4134     if (!PhiR)
4135       continue;
4136     auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4137     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(PhiR)) {
4138       fixReduction(ReductionPhi, State);
4139     } else if (Legal->isFirstOrderRecurrence(OrigPhi))
4140       fixFirstOrderRecurrence(PhiR, State);
4141   }
4142 }
4143 
4144 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR,
4145                                                   VPTransformState &State) {
4146   // This is the second phase of vectorizing first-order recurrences. An
4147   // overview of the transformation is described below. Suppose we have the
4148   // following loop.
4149   //
4150   //   for (int i = 0; i < n; ++i)
4151   //     b[i] = a[i] - a[i - 1];
4152   //
4153   // There is a first-order recurrence on "a". For this loop, the shorthand
4154   // scalar IR looks like:
4155   //
4156   //   scalar.ph:
4157   //     s_init = a[-1]
4158   //     br scalar.body
4159   //
4160   //   scalar.body:
4161   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4162   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4163   //     s2 = a[i]
4164   //     b[i] = s2 - s1
4165   //     br cond, scalar.body, ...
4166   //
4167   // In this example, s1 is a recurrence because it's value depends on the
4168   // previous iteration. In the first phase of vectorization, we created a
4169   // temporary value for s1. We now complete the vectorization and produce the
4170   // shorthand vector IR shown below (for VF = 4, UF = 1).
4171   //
4172   //   vector.ph:
4173   //     v_init = vector(..., ..., ..., a[-1])
4174   //     br vector.body
4175   //
4176   //   vector.body
4177   //     i = phi [0, vector.ph], [i+4, vector.body]
4178   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4179   //     v2 = a[i, i+1, i+2, i+3];
4180   //     v3 = vector(v1(3), v2(0, 1, 2))
4181   //     b[i, i+1, i+2, i+3] = v2 - v3
4182   //     br cond, vector.body, middle.block
4183   //
4184   //   middle.block:
4185   //     x = v2(3)
4186   //     br scalar.ph
4187   //
4188   //   scalar.ph:
4189   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4190   //     br scalar.body
4191   //
4192   // After execution completes the vector loop, we extract the next value of
4193   // the recurrence (x) to use as the initial value in the scalar loop.
4194 
4195   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4196 
4197   auto *IdxTy = Builder.getInt32Ty();
4198   auto *One = ConstantInt::get(IdxTy, 1);
4199 
4200   // Create a vector from the initial value.
4201   auto *VectorInit = ScalarInit;
4202   if (VF.isVector()) {
4203     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4204     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4205     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4206     VectorInit = Builder.CreateInsertElement(
4207         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)),
4208         VectorInit, LastIdx, "vector.recur.init");
4209   }
4210 
4211   VPValue *PreviousDef = PhiR->getBackedgeValue();
4212   // We constructed a temporary phi node in the first phase of vectorization.
4213   // This phi node will eventually be deleted.
4214   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiR, 0)));
4215 
4216   // Create a phi node for the new recurrence. The current value will either be
4217   // the initial value inserted into a vector or loop-varying vector value.
4218   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4219   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4220 
4221   // Get the vectorized previous value of the last part UF - 1. It appears last
4222   // among all unrolled iterations, due to the order of their construction.
4223   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4224 
4225   // Find and set the insertion point after the previous value if it is an
4226   // instruction.
4227   BasicBlock::iterator InsertPt;
4228   // Note that the previous value may have been constant-folded so it is not
4229   // guaranteed to be an instruction in the vector loop.
4230   // FIXME: Loop invariant values do not form recurrences. We should deal with
4231   //        them earlier.
4232   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4233     InsertPt = LoopVectorBody->getFirstInsertionPt();
4234   else {
4235     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4236     if (isa<PHINode>(PreviousLastPart))
4237       // If the previous value is a phi node, we should insert after all the phi
4238       // nodes in the block containing the PHI to avoid breaking basic block
4239       // verification. Note that the basic block may be different to
4240       // LoopVectorBody, in case we predicate the loop.
4241       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4242     else
4243       InsertPt = ++PreviousInst->getIterator();
4244   }
4245   Builder.SetInsertPoint(&*InsertPt);
4246 
4247   // The vector from which to take the initial value for the current iteration
4248   // (actual or unrolled). Initially, this is the vector phi node.
4249   Value *Incoming = VecPhi;
4250 
4251   // Shuffle the current and previous vector and update the vector parts.
4252   for (unsigned Part = 0; Part < UF; ++Part) {
4253     Value *PreviousPart = State.get(PreviousDef, Part);
4254     Value *PhiPart = State.get(PhiR, Part);
4255     auto *Shuffle = VF.isVector()
4256                         ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1)
4257                         : Incoming;
4258     PhiPart->replaceAllUsesWith(Shuffle);
4259     cast<Instruction>(PhiPart)->eraseFromParent();
4260     State.reset(PhiR, Shuffle, Part);
4261     Incoming = PreviousPart;
4262   }
4263 
4264   // Fix the latch value of the new recurrence in the vector loop.
4265   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4266 
4267   // Extract the last vector element in the middle block. This will be the
4268   // initial value for the recurrence when jumping to the scalar loop.
4269   auto *ExtractForScalar = Incoming;
4270   if (VF.isVector()) {
4271     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4272     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4273     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4274     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4275                                                     "vector.recur.extract");
4276   }
4277   // Extract the second last element in the middle block if the
4278   // Phi is used outside the loop. We need to extract the phi itself
4279   // and not the last element (the phi update in the current iteration). This
4280   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4281   // when the scalar loop is not run at all.
4282   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4283   if (VF.isVector()) {
4284     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4285     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4286     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4287         Incoming, Idx, "vector.recur.extract.for.phi");
4288   } else if (UF > 1)
4289     // When loop is unrolled without vectorizing, initialize
4290     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4291     // of `Incoming`. This is analogous to the vectorized case above: extracting
4292     // the second last element when VF > 1.
4293     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4294 
4295   // Fix the initial value of the original recurrence in the scalar loop.
4296   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4297   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4298   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4299   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4300     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4301     Start->addIncoming(Incoming, BB);
4302   }
4303 
4304   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4305   Phi->setName("scalar.recur");
4306 
4307   // Finally, fix users of the recurrence outside the loop. The users will need
4308   // either the last value of the scalar recurrence or the last value of the
4309   // vector recurrence we extracted in the middle block. Since the loop is in
4310   // LCSSA form, we just need to find all the phi nodes for the original scalar
4311   // recurrence in the exit block, and then add an edge for the middle block.
4312   // Note that LCSSA does not imply single entry when the original scalar loop
4313   // had multiple exiting edges (as we always run the last iteration in the
4314   // scalar epilogue); in that case, the exiting path through middle will be
4315   // dynamically dead and the value picked for the phi doesn't matter.
4316   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4317     if (any_of(LCSSAPhi.incoming_values(),
4318                [Phi](Value *V) { return V == Phi; }))
4319       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4320 }
4321 
4322 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4323                                        VPTransformState &State) {
4324   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4325   // Get it's reduction variable descriptor.
4326   assert(Legal->isReductionVariable(OrigPhi) &&
4327          "Unable to find the reduction variable");
4328   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4329 
4330   RecurKind RK = RdxDesc.getRecurrenceKind();
4331   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4332   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4333   setDebugLocFromInst(ReductionStartValue);
4334 
4335   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4336   // This is the vector-clone of the value that leaves the loop.
4337   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4338 
4339   // Wrap flags are in general invalid after vectorization, clear them.
4340   clearReductionWrapFlags(RdxDesc, State);
4341 
4342   // Fix the vector-loop phi.
4343 
4344   // Reductions do not have to start at zero. They can start with
4345   // any loop invariant values.
4346   BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4347 
4348   unsigned LastPartForNewPhi = PhiR->isOrdered() ? 1 : UF;
4349   for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
4350     Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part);
4351     Value *Val = State.get(PhiR->getBackedgeValue(), Part);
4352     if (PhiR->isOrdered())
4353       Val = State.get(PhiR->getBackedgeValue(), UF - 1);
4354 
4355     cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch);
4356   }
4357 
4358   // Before each round, move the insertion point right between
4359   // the PHIs and the values we are going to write.
4360   // This allows us to write both PHINodes and the extractelement
4361   // instructions.
4362   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4363 
4364   setDebugLocFromInst(LoopExitInst);
4365 
4366   Type *PhiTy = OrigPhi->getType();
4367   // If tail is folded by masking, the vector value to leave the loop should be
4368   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4369   // instead of the former. For an inloop reduction the reduction will already
4370   // be predicated, and does not need to be handled here.
4371   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4372     for (unsigned Part = 0; Part < UF; ++Part) {
4373       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4374       Value *Sel = nullptr;
4375       for (User *U : VecLoopExitInst->users()) {
4376         if (isa<SelectInst>(U)) {
4377           assert(!Sel && "Reduction exit feeding two selects");
4378           Sel = U;
4379         } else
4380           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4381       }
4382       assert(Sel && "Reduction exit feeds no select");
4383       State.reset(LoopExitInstDef, Sel, Part);
4384 
4385       // If the target can create a predicated operator for the reduction at no
4386       // extra cost in the loop (for example a predicated vadd), it can be
4387       // cheaper for the select to remain in the loop than be sunk out of it,
4388       // and so use the select value for the phi instead of the old
4389       // LoopExitValue.
4390       if (PreferPredicatedReductionSelect ||
4391           TTI->preferPredicatedReductionSelect(
4392               RdxDesc.getOpcode(), PhiTy,
4393               TargetTransformInfo::ReductionFlags())) {
4394         auto *VecRdxPhi =
4395             cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part));
4396         VecRdxPhi->setIncomingValueForBlock(
4397             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4398       }
4399     }
4400   }
4401 
4402   // If the vector reduction can be performed in a smaller type, we truncate
4403   // then extend the loop exit value to enable InstCombine to evaluate the
4404   // entire expression in the smaller type.
4405   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4406     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4407     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4408     Builder.SetInsertPoint(
4409         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4410     VectorParts RdxParts(UF);
4411     for (unsigned Part = 0; Part < UF; ++Part) {
4412       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4413       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4414       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4415                                         : Builder.CreateZExt(Trunc, VecTy);
4416       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4417            UI != RdxParts[Part]->user_end();)
4418         if (*UI != Trunc) {
4419           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4420           RdxParts[Part] = Extnd;
4421         } else {
4422           ++UI;
4423         }
4424     }
4425     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4426     for (unsigned Part = 0; Part < UF; ++Part) {
4427       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4428       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4429     }
4430   }
4431 
4432   // Reduce all of the unrolled parts into a single vector.
4433   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4434   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4435 
4436   // The middle block terminator has already been assigned a DebugLoc here (the
4437   // OrigLoop's single latch terminator). We want the whole middle block to
4438   // appear to execute on this line because: (a) it is all compiler generated,
4439   // (b) these instructions are always executed after evaluating the latch
4440   // conditional branch, and (c) other passes may add new predecessors which
4441   // terminate on this line. This is the easiest way to ensure we don't
4442   // accidentally cause an extra step back into the loop while debugging.
4443   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4444   if (PhiR->isOrdered())
4445     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4446   else {
4447     // Floating-point operations should have some FMF to enable the reduction.
4448     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4449     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4450     for (unsigned Part = 1; Part < UF; ++Part) {
4451       Value *RdxPart = State.get(LoopExitInstDef, Part);
4452       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4453         ReducedPartRdx = Builder.CreateBinOp(
4454             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4455       } else {
4456         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4457       }
4458     }
4459   }
4460 
4461   // Create the reduction after the loop. Note that inloop reductions create the
4462   // target reduction in the loop using a Reduction recipe.
4463   if (VF.isVector() && !PhiR->isInLoop()) {
4464     ReducedPartRdx =
4465         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4466     // If the reduction can be performed in a smaller type, we need to extend
4467     // the reduction to the wider type before we branch to the original loop.
4468     if (PhiTy != RdxDesc.getRecurrenceType())
4469       ReducedPartRdx = RdxDesc.isSigned()
4470                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4471                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4472   }
4473 
4474   // Create a phi node that merges control-flow from the backedge-taken check
4475   // block and the middle block.
4476   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4477                                         LoopScalarPreHeader->getTerminator());
4478   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4479     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4480   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4481 
4482   // Now, we need to fix the users of the reduction variable
4483   // inside and outside of the scalar remainder loop.
4484 
4485   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4486   // in the exit blocks.  See comment on analogous loop in
4487   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4488   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4489     if (any_of(LCSSAPhi.incoming_values(),
4490                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4491       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4492 
4493   // Fix the scalar loop reduction variable with the incoming reduction sum
4494   // from the vector body and from the backedge value.
4495   int IncomingEdgeBlockIdx =
4496       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4497   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4498   // Pick the other block.
4499   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4500   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4501   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4502 }
4503 
4504 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4505                                                   VPTransformState &State) {
4506   RecurKind RK = RdxDesc.getRecurrenceKind();
4507   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4508     return;
4509 
4510   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4511   assert(LoopExitInstr && "null loop exit instruction");
4512   SmallVector<Instruction *, 8> Worklist;
4513   SmallPtrSet<Instruction *, 8> Visited;
4514   Worklist.push_back(LoopExitInstr);
4515   Visited.insert(LoopExitInstr);
4516 
4517   while (!Worklist.empty()) {
4518     Instruction *Cur = Worklist.pop_back_val();
4519     if (isa<OverflowingBinaryOperator>(Cur))
4520       for (unsigned Part = 0; Part < UF; ++Part) {
4521         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4522         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4523       }
4524 
4525     for (User *U : Cur->users()) {
4526       Instruction *UI = cast<Instruction>(U);
4527       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4528           Visited.insert(UI).second)
4529         Worklist.push_back(UI);
4530     }
4531   }
4532 }
4533 
4534 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4535   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4536     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4537       // Some phis were already hand updated by the reduction and recurrence
4538       // code above, leave them alone.
4539       continue;
4540 
4541     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4542     // Non-instruction incoming values will have only one value.
4543 
4544     VPLane Lane = VPLane::getFirstLane();
4545     if (isa<Instruction>(IncomingValue) &&
4546         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4547                                            VF))
4548       Lane = VPLane::getLastLaneForVF(VF);
4549 
4550     // Can be a loop invariant incoming value or the last scalar value to be
4551     // extracted from the vectorized loop.
4552     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4553     Value *lastIncomingValue =
4554         OrigLoop->isLoopInvariant(IncomingValue)
4555             ? IncomingValue
4556             : State.get(State.Plan->getVPValue(IncomingValue),
4557                         VPIteration(UF - 1, Lane));
4558     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4559   }
4560 }
4561 
4562 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4563   // The basic block and loop containing the predicated instruction.
4564   auto *PredBB = PredInst->getParent();
4565   auto *VectorLoop = LI->getLoopFor(PredBB);
4566 
4567   // Initialize a worklist with the operands of the predicated instruction.
4568   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4569 
4570   // Holds instructions that we need to analyze again. An instruction may be
4571   // reanalyzed if we don't yet know if we can sink it or not.
4572   SmallVector<Instruction *, 8> InstsToReanalyze;
4573 
4574   // Returns true if a given use occurs in the predicated block. Phi nodes use
4575   // their operands in their corresponding predecessor blocks.
4576   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4577     auto *I = cast<Instruction>(U.getUser());
4578     BasicBlock *BB = I->getParent();
4579     if (auto *Phi = dyn_cast<PHINode>(I))
4580       BB = Phi->getIncomingBlock(
4581           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4582     return BB == PredBB;
4583   };
4584 
4585   // Iteratively sink the scalarized operands of the predicated instruction
4586   // into the block we created for it. When an instruction is sunk, it's
4587   // operands are then added to the worklist. The algorithm ends after one pass
4588   // through the worklist doesn't sink a single instruction.
4589   bool Changed;
4590   do {
4591     // Add the instructions that need to be reanalyzed to the worklist, and
4592     // reset the changed indicator.
4593     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4594     InstsToReanalyze.clear();
4595     Changed = false;
4596 
4597     while (!Worklist.empty()) {
4598       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4599 
4600       // We can't sink an instruction if it is a phi node, is not in the loop,
4601       // or may have side effects.
4602       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4603           I->mayHaveSideEffects())
4604         continue;
4605 
4606       // If the instruction is already in PredBB, check if we can sink its
4607       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4608       // sinking the scalar instruction I, hence it appears in PredBB; but it
4609       // may have failed to sink I's operands (recursively), which we try
4610       // (again) here.
4611       if (I->getParent() == PredBB) {
4612         Worklist.insert(I->op_begin(), I->op_end());
4613         continue;
4614       }
4615 
4616       // It's legal to sink the instruction if all its uses occur in the
4617       // predicated block. Otherwise, there's nothing to do yet, and we may
4618       // need to reanalyze the instruction.
4619       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4620         InstsToReanalyze.push_back(I);
4621         continue;
4622       }
4623 
4624       // Move the instruction to the beginning of the predicated block, and add
4625       // it's operands to the worklist.
4626       I->moveBefore(&*PredBB->getFirstInsertionPt());
4627       Worklist.insert(I->op_begin(), I->op_end());
4628 
4629       // The sinking may have enabled other instructions to be sunk, so we will
4630       // need to iterate.
4631       Changed = true;
4632     }
4633   } while (Changed);
4634 }
4635 
4636 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4637   for (PHINode *OrigPhi : OrigPHIsToFix) {
4638     VPWidenPHIRecipe *VPPhi =
4639         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4640     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4641     // Make sure the builder has a valid insert point.
4642     Builder.SetInsertPoint(NewPhi);
4643     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4644       VPValue *Inc = VPPhi->getIncomingValue(i);
4645       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4646       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4647     }
4648   }
4649 }
4650 
4651 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4652   return Cost->useOrderedReductions(RdxDesc);
4653 }
4654 
4655 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4656                                    VPUser &Operands, unsigned UF,
4657                                    ElementCount VF, bool IsPtrLoopInvariant,
4658                                    SmallBitVector &IsIndexLoopInvariant,
4659                                    VPTransformState &State) {
4660   // Construct a vector GEP by widening the operands of the scalar GEP as
4661   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4662   // results in a vector of pointers when at least one operand of the GEP
4663   // is vector-typed. Thus, to keep the representation compact, we only use
4664   // vector-typed operands for loop-varying values.
4665 
4666   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4667     // If we are vectorizing, but the GEP has only loop-invariant operands,
4668     // the GEP we build (by only using vector-typed operands for
4669     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4670     // produce a vector of pointers, we need to either arbitrarily pick an
4671     // operand to broadcast, or broadcast a clone of the original GEP.
4672     // Here, we broadcast a clone of the original.
4673     //
4674     // TODO: If at some point we decide to scalarize instructions having
4675     //       loop-invariant operands, this special case will no longer be
4676     //       required. We would add the scalarization decision to
4677     //       collectLoopScalars() and teach getVectorValue() to broadcast
4678     //       the lane-zero scalar value.
4679     auto *Clone = Builder.Insert(GEP->clone());
4680     for (unsigned Part = 0; Part < UF; ++Part) {
4681       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4682       State.set(VPDef, EntryPart, Part);
4683       addMetadata(EntryPart, GEP);
4684     }
4685   } else {
4686     // If the GEP has at least one loop-varying operand, we are sure to
4687     // produce a vector of pointers. But if we are only unrolling, we want
4688     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4689     // produce with the code below will be scalar (if VF == 1) or vector
4690     // (otherwise). Note that for the unroll-only case, we still maintain
4691     // values in the vector mapping with initVector, as we do for other
4692     // instructions.
4693     for (unsigned Part = 0; Part < UF; ++Part) {
4694       // The pointer operand of the new GEP. If it's loop-invariant, we
4695       // won't broadcast it.
4696       auto *Ptr = IsPtrLoopInvariant
4697                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4698                       : State.get(Operands.getOperand(0), Part);
4699 
4700       // Collect all the indices for the new GEP. If any index is
4701       // loop-invariant, we won't broadcast it.
4702       SmallVector<Value *, 4> Indices;
4703       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4704         VPValue *Operand = Operands.getOperand(I);
4705         if (IsIndexLoopInvariant[I - 1])
4706           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4707         else
4708           Indices.push_back(State.get(Operand, Part));
4709       }
4710 
4711       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4712       // but it should be a vector, otherwise.
4713       auto *NewGEP =
4714           GEP->isInBounds()
4715               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4716                                           Indices)
4717               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4718       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4719              "NewGEP is not a pointer vector");
4720       State.set(VPDef, NewGEP, Part);
4721       addMetadata(NewGEP, GEP);
4722     }
4723   }
4724 }
4725 
4726 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4727                                               VPWidenPHIRecipe *PhiR,
4728                                               VPTransformState &State) {
4729   PHINode *P = cast<PHINode>(PN);
4730   if (EnableVPlanNativePath) {
4731     // Currently we enter here in the VPlan-native path for non-induction
4732     // PHIs where all control flow is uniform. We simply widen these PHIs.
4733     // Create a vector phi with no operands - the vector phi operands will be
4734     // set at the end of vector code generation.
4735     Type *VecTy = (State.VF.isScalar())
4736                       ? PN->getType()
4737                       : VectorType::get(PN->getType(), State.VF);
4738     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4739     State.set(PhiR, VecPhi, 0);
4740     OrigPHIsToFix.push_back(P);
4741 
4742     return;
4743   }
4744 
4745   assert(PN->getParent() == OrigLoop->getHeader() &&
4746          "Non-header phis should have been handled elsewhere");
4747 
4748   // In order to support recurrences we need to be able to vectorize Phi nodes.
4749   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4750   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4751   // this value when we vectorize all of the instructions that use the PHI.
4752   if (Legal->isFirstOrderRecurrence(P)) {
4753     Type *VecTy = State.VF.isScalar()
4754                       ? PN->getType()
4755                       : VectorType::get(PN->getType(), State.VF);
4756 
4757     for (unsigned Part = 0; Part < State.UF; ++Part) {
4758       Value *EntryPart = PHINode::Create(
4759           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4760       State.set(PhiR, EntryPart, Part);
4761     }
4762       return;
4763   }
4764 
4765   assert(!Legal->isReductionVariable(P) &&
4766          "reductions should be handled elsewhere");
4767 
4768   setDebugLocFromInst(P);
4769 
4770   // This PHINode must be an induction variable.
4771   // Make sure that we know about it.
4772   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4773 
4774   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4775   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4776 
4777   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4778   // which can be found from the original scalar operations.
4779   switch (II.getKind()) {
4780   case InductionDescriptor::IK_NoInduction:
4781     llvm_unreachable("Unknown induction");
4782   case InductionDescriptor::IK_IntInduction:
4783   case InductionDescriptor::IK_FpInduction:
4784     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4785   case InductionDescriptor::IK_PtrInduction: {
4786     // Handle the pointer induction variable case.
4787     assert(P->getType()->isPointerTy() && "Unexpected type.");
4788 
4789     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4790       // This is the normalized GEP that starts counting at zero.
4791       Value *PtrInd =
4792           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4793       // Determine the number of scalars we need to generate for each unroll
4794       // iteration. If the instruction is uniform, we only need to generate the
4795       // first lane. Otherwise, we generate all VF values.
4796       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4797       unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
4798 
4799       bool NeedsVectorIndex = !IsUniform && VF.isScalable();
4800       Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr;
4801       if (NeedsVectorIndex) {
4802         Type *VecIVTy = VectorType::get(PtrInd->getType(), VF);
4803         UnitStepVec = Builder.CreateStepVector(VecIVTy);
4804         PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd);
4805       }
4806 
4807       for (unsigned Part = 0; Part < UF; ++Part) {
4808         Value *PartStart = createStepForVF(
4809             Builder, ConstantInt::get(PtrInd->getType(), Part), VF);
4810 
4811         if (NeedsVectorIndex) {
4812           Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart);
4813           Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec);
4814           Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices);
4815           Value *SclrGep =
4816               emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II);
4817           SclrGep->setName("next.gep");
4818           State.set(PhiR, SclrGep, Part);
4819           // We've cached the whole vector, which means we can support the
4820           // extraction of any lane.
4821           continue;
4822         }
4823 
4824         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4825           Value *Idx = Builder.CreateAdd(
4826               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4827           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4828           Value *SclrGep =
4829               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4830           SclrGep->setName("next.gep");
4831           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4832         }
4833       }
4834       return;
4835     }
4836     assert(isa<SCEVConstant>(II.getStep()) &&
4837            "Induction step not a SCEV constant!");
4838     Type *PhiType = II.getStep()->getType();
4839 
4840     // Build a pointer phi
4841     Value *ScalarStartValue = II.getStartValue();
4842     Type *ScStValueType = ScalarStartValue->getType();
4843     PHINode *NewPointerPhi =
4844         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4845     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4846 
4847     // A pointer induction, performed by using a gep
4848     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4849     Instruction *InductionLoc = LoopLatch->getTerminator();
4850     const SCEV *ScalarStep = II.getStep();
4851     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4852     Value *ScalarStepValue =
4853         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4854     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4855     Value *NumUnrolledElems =
4856         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4857     Value *InductionGEP = GetElementPtrInst::Create(
4858         ScStValueType->getPointerElementType(), NewPointerPhi,
4859         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4860         InductionLoc);
4861     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4862 
4863     // Create UF many actual address geps that use the pointer
4864     // phi as base and a vectorized version of the step value
4865     // (<step*0, ..., step*N>) as offset.
4866     for (unsigned Part = 0; Part < State.UF; ++Part) {
4867       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4868       Value *StartOffsetScalar =
4869           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4870       Value *StartOffset =
4871           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4872       // Create a vector of consecutive numbers from zero to VF.
4873       StartOffset =
4874           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4875 
4876       Value *GEP = Builder.CreateGEP(
4877           ScStValueType->getPointerElementType(), NewPointerPhi,
4878           Builder.CreateMul(
4879               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4880               "vector.gep"));
4881       State.set(PhiR, GEP, Part);
4882     }
4883   }
4884   }
4885 }
4886 
4887 /// A helper function for checking whether an integer division-related
4888 /// instruction may divide by zero (in which case it must be predicated if
4889 /// executed conditionally in the scalar code).
4890 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4891 /// Non-zero divisors that are non compile-time constants will not be
4892 /// converted into multiplication, so we will still end up scalarizing
4893 /// the division, but can do so w/o predication.
4894 static bool mayDivideByZero(Instruction &I) {
4895   assert((I.getOpcode() == Instruction::UDiv ||
4896           I.getOpcode() == Instruction::SDiv ||
4897           I.getOpcode() == Instruction::URem ||
4898           I.getOpcode() == Instruction::SRem) &&
4899          "Unexpected instruction");
4900   Value *Divisor = I.getOperand(1);
4901   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4902   return !CInt || CInt->isZero();
4903 }
4904 
4905 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4906                                            VPUser &User,
4907                                            VPTransformState &State) {
4908   switch (I.getOpcode()) {
4909   case Instruction::Call:
4910   case Instruction::Br:
4911   case Instruction::PHI:
4912   case Instruction::GetElementPtr:
4913   case Instruction::Select:
4914     llvm_unreachable("This instruction is handled by a different recipe.");
4915   case Instruction::UDiv:
4916   case Instruction::SDiv:
4917   case Instruction::SRem:
4918   case Instruction::URem:
4919   case Instruction::Add:
4920   case Instruction::FAdd:
4921   case Instruction::Sub:
4922   case Instruction::FSub:
4923   case Instruction::FNeg:
4924   case Instruction::Mul:
4925   case Instruction::FMul:
4926   case Instruction::FDiv:
4927   case Instruction::FRem:
4928   case Instruction::Shl:
4929   case Instruction::LShr:
4930   case Instruction::AShr:
4931   case Instruction::And:
4932   case Instruction::Or:
4933   case Instruction::Xor: {
4934     // Just widen unops and binops.
4935     setDebugLocFromInst(&I);
4936 
4937     for (unsigned Part = 0; Part < UF; ++Part) {
4938       SmallVector<Value *, 2> Ops;
4939       for (VPValue *VPOp : User.operands())
4940         Ops.push_back(State.get(VPOp, Part));
4941 
4942       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4943 
4944       if (auto *VecOp = dyn_cast<Instruction>(V))
4945         VecOp->copyIRFlags(&I);
4946 
4947       // Use this vector value for all users of the original instruction.
4948       State.set(Def, V, Part);
4949       addMetadata(V, &I);
4950     }
4951 
4952     break;
4953   }
4954   case Instruction::ICmp:
4955   case Instruction::FCmp: {
4956     // Widen compares. Generate vector compares.
4957     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4958     auto *Cmp = cast<CmpInst>(&I);
4959     setDebugLocFromInst(Cmp);
4960     for (unsigned Part = 0; Part < UF; ++Part) {
4961       Value *A = State.get(User.getOperand(0), Part);
4962       Value *B = State.get(User.getOperand(1), Part);
4963       Value *C = nullptr;
4964       if (FCmp) {
4965         // Propagate fast math flags.
4966         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4967         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4968         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4969       } else {
4970         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4971       }
4972       State.set(Def, C, Part);
4973       addMetadata(C, &I);
4974     }
4975 
4976     break;
4977   }
4978 
4979   case Instruction::ZExt:
4980   case Instruction::SExt:
4981   case Instruction::FPToUI:
4982   case Instruction::FPToSI:
4983   case Instruction::FPExt:
4984   case Instruction::PtrToInt:
4985   case Instruction::IntToPtr:
4986   case Instruction::SIToFP:
4987   case Instruction::UIToFP:
4988   case Instruction::Trunc:
4989   case Instruction::FPTrunc:
4990   case Instruction::BitCast: {
4991     auto *CI = cast<CastInst>(&I);
4992     setDebugLocFromInst(CI);
4993 
4994     /// Vectorize casts.
4995     Type *DestTy =
4996         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4997 
4998     for (unsigned Part = 0; Part < UF; ++Part) {
4999       Value *A = State.get(User.getOperand(0), Part);
5000       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
5001       State.set(Def, Cast, Part);
5002       addMetadata(Cast, &I);
5003     }
5004     break;
5005   }
5006   default:
5007     // This instruction is not vectorized by simple widening.
5008     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
5009     llvm_unreachable("Unhandled instruction!");
5010   } // end of switch.
5011 }
5012 
5013 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
5014                                                VPUser &ArgOperands,
5015                                                VPTransformState &State) {
5016   assert(!isa<DbgInfoIntrinsic>(I) &&
5017          "DbgInfoIntrinsic should have been dropped during VPlan construction");
5018   setDebugLocFromInst(&I);
5019 
5020   Module *M = I.getParent()->getParent()->getParent();
5021   auto *CI = cast<CallInst>(&I);
5022 
5023   SmallVector<Type *, 4> Tys;
5024   for (Value *ArgOperand : CI->arg_operands())
5025     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
5026 
5027   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5028 
5029   // The flag shows whether we use Intrinsic or a usual Call for vectorized
5030   // version of the instruction.
5031   // Is it beneficial to perform intrinsic call compared to lib call?
5032   bool NeedToScalarize = false;
5033   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
5034   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
5035   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
5036   assert((UseVectorIntrinsic || !NeedToScalarize) &&
5037          "Instruction should be scalarized elsewhere.");
5038   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
5039          "Either the intrinsic cost or vector call cost must be valid");
5040 
5041   for (unsigned Part = 0; Part < UF; ++Part) {
5042     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
5043     SmallVector<Value *, 4> Args;
5044     for (auto &I : enumerate(ArgOperands.operands())) {
5045       // Some intrinsics have a scalar argument - don't replace it with a
5046       // vector.
5047       Value *Arg;
5048       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
5049         Arg = State.get(I.value(), Part);
5050       else {
5051         Arg = State.get(I.value(), VPIteration(0, 0));
5052         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
5053           TysForDecl.push_back(Arg->getType());
5054       }
5055       Args.push_back(Arg);
5056     }
5057 
5058     Function *VectorF;
5059     if (UseVectorIntrinsic) {
5060       // Use vector version of the intrinsic.
5061       if (VF.isVector())
5062         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5063       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5064       assert(VectorF && "Can't retrieve vector intrinsic.");
5065     } else {
5066       // Use vector version of the function call.
5067       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
5068 #ifndef NDEBUG
5069       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
5070              "Can't create vector function.");
5071 #endif
5072         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
5073     }
5074       SmallVector<OperandBundleDef, 1> OpBundles;
5075       CI->getOperandBundlesAsDefs(OpBundles);
5076       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5077 
5078       if (isa<FPMathOperator>(V))
5079         V->copyFastMathFlags(CI);
5080 
5081       State.set(Def, V, Part);
5082       addMetadata(V, &I);
5083   }
5084 }
5085 
5086 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5087                                                  VPUser &Operands,
5088                                                  bool InvariantCond,
5089                                                  VPTransformState &State) {
5090   setDebugLocFromInst(&I);
5091 
5092   // The condition can be loop invariant  but still defined inside the
5093   // loop. This means that we can't just use the original 'cond' value.
5094   // We have to take the 'vectorized' value and pick the first lane.
5095   // Instcombine will make this a no-op.
5096   auto *InvarCond = InvariantCond
5097                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5098                         : nullptr;
5099 
5100   for (unsigned Part = 0; Part < UF; ++Part) {
5101     Value *Cond =
5102         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5103     Value *Op0 = State.get(Operands.getOperand(1), Part);
5104     Value *Op1 = State.get(Operands.getOperand(2), Part);
5105     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5106     State.set(VPDef, Sel, Part);
5107     addMetadata(Sel, &I);
5108   }
5109 }
5110 
5111 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5112   // We should not collect Scalars more than once per VF. Right now, this
5113   // function is called from collectUniformsAndScalars(), which already does
5114   // this check. Collecting Scalars for VF=1 does not make any sense.
5115   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5116          "This function should not be visited twice for the same VF");
5117 
5118   SmallSetVector<Instruction *, 8> Worklist;
5119 
5120   // These sets are used to seed the analysis with pointers used by memory
5121   // accesses that will remain scalar.
5122   SmallSetVector<Instruction *, 8> ScalarPtrs;
5123   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5124   auto *Latch = TheLoop->getLoopLatch();
5125 
5126   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5127   // The pointer operands of loads and stores will be scalar as long as the
5128   // memory access is not a gather or scatter operation. The value operand of a
5129   // store will remain scalar if the store is scalarized.
5130   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5131     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5132     assert(WideningDecision != CM_Unknown &&
5133            "Widening decision should be ready at this moment");
5134     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5135       if (Ptr == Store->getValueOperand())
5136         return WideningDecision == CM_Scalarize;
5137     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5138            "Ptr is neither a value or pointer operand");
5139     return WideningDecision != CM_GatherScatter;
5140   };
5141 
5142   // A helper that returns true if the given value is a bitcast or
5143   // getelementptr instruction contained in the loop.
5144   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5145     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5146             isa<GetElementPtrInst>(V)) &&
5147            !TheLoop->isLoopInvariant(V);
5148   };
5149 
5150   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5151     if (!isa<PHINode>(Ptr) ||
5152         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5153       return false;
5154     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5155     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5156       return false;
5157     return isScalarUse(MemAccess, Ptr);
5158   };
5159 
5160   // A helper that evaluates a memory access's use of a pointer. If the
5161   // pointer is actually the pointer induction of a loop, it is being
5162   // inserted into Worklist. If the use will be a scalar use, and the
5163   // pointer is only used by memory accesses, we place the pointer in
5164   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5165   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5166     if (isScalarPtrInduction(MemAccess, Ptr)) {
5167       Worklist.insert(cast<Instruction>(Ptr));
5168       Instruction *Update = cast<Instruction>(
5169           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5170       Worklist.insert(Update);
5171       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5172                         << "\n");
5173       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5174                         << "\n");
5175       return;
5176     }
5177     // We only care about bitcast and getelementptr instructions contained in
5178     // the loop.
5179     if (!isLoopVaryingBitCastOrGEP(Ptr))
5180       return;
5181 
5182     // If the pointer has already been identified as scalar (e.g., if it was
5183     // also identified as uniform), there's nothing to do.
5184     auto *I = cast<Instruction>(Ptr);
5185     if (Worklist.count(I))
5186       return;
5187 
5188     // If the use of the pointer will be a scalar use, and all users of the
5189     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5190     // place the pointer in PossibleNonScalarPtrs.
5191     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5192           return isa<LoadInst>(U) || isa<StoreInst>(U);
5193         }))
5194       ScalarPtrs.insert(I);
5195     else
5196       PossibleNonScalarPtrs.insert(I);
5197   };
5198 
5199   // We seed the scalars analysis with three classes of instructions: (1)
5200   // instructions marked uniform-after-vectorization and (2) bitcast,
5201   // getelementptr and (pointer) phi instructions used by memory accesses
5202   // requiring a scalar use.
5203   //
5204   // (1) Add to the worklist all instructions that have been identified as
5205   // uniform-after-vectorization.
5206   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5207 
5208   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5209   // memory accesses requiring a scalar use. The pointer operands of loads and
5210   // stores will be scalar as long as the memory accesses is not a gather or
5211   // scatter operation. The value operand of a store will remain scalar if the
5212   // store is scalarized.
5213   for (auto *BB : TheLoop->blocks())
5214     for (auto &I : *BB) {
5215       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5216         evaluatePtrUse(Load, Load->getPointerOperand());
5217       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5218         evaluatePtrUse(Store, Store->getPointerOperand());
5219         evaluatePtrUse(Store, Store->getValueOperand());
5220       }
5221     }
5222   for (auto *I : ScalarPtrs)
5223     if (!PossibleNonScalarPtrs.count(I)) {
5224       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5225       Worklist.insert(I);
5226     }
5227 
5228   // Insert the forced scalars.
5229   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5230   // induction variable when the PHI user is scalarized.
5231   auto ForcedScalar = ForcedScalars.find(VF);
5232   if (ForcedScalar != ForcedScalars.end())
5233     for (auto *I : ForcedScalar->second)
5234       Worklist.insert(I);
5235 
5236   // Expand the worklist by looking through any bitcasts and getelementptr
5237   // instructions we've already identified as scalar. This is similar to the
5238   // expansion step in collectLoopUniforms(); however, here we're only
5239   // expanding to include additional bitcasts and getelementptr instructions.
5240   unsigned Idx = 0;
5241   while (Idx != Worklist.size()) {
5242     Instruction *Dst = Worklist[Idx++];
5243     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5244       continue;
5245     auto *Src = cast<Instruction>(Dst->getOperand(0));
5246     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5247           auto *J = cast<Instruction>(U);
5248           return !TheLoop->contains(J) || Worklist.count(J) ||
5249                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5250                   isScalarUse(J, Src));
5251         })) {
5252       Worklist.insert(Src);
5253       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5254     }
5255   }
5256 
5257   // An induction variable will remain scalar if all users of the induction
5258   // variable and induction variable update remain scalar.
5259   for (auto &Induction : Legal->getInductionVars()) {
5260     auto *Ind = Induction.first;
5261     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5262 
5263     // If tail-folding is applied, the primary induction variable will be used
5264     // to feed a vector compare.
5265     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5266       continue;
5267 
5268     // Determine if all users of the induction variable are scalar after
5269     // vectorization.
5270     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5271       auto *I = cast<Instruction>(U);
5272       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5273     });
5274     if (!ScalarInd)
5275       continue;
5276 
5277     // Determine if all users of the induction variable update instruction are
5278     // scalar after vectorization.
5279     auto ScalarIndUpdate =
5280         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5281           auto *I = cast<Instruction>(U);
5282           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5283         });
5284     if (!ScalarIndUpdate)
5285       continue;
5286 
5287     // The induction variable and its update instruction will remain scalar.
5288     Worklist.insert(Ind);
5289     Worklist.insert(IndUpdate);
5290     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5291     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5292                       << "\n");
5293   }
5294 
5295   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5296 }
5297 
5298 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
5299   if (!blockNeedsPredication(I->getParent()))
5300     return false;
5301   switch(I->getOpcode()) {
5302   default:
5303     break;
5304   case Instruction::Load:
5305   case Instruction::Store: {
5306     if (!Legal->isMaskRequired(I))
5307       return false;
5308     auto *Ptr = getLoadStorePointerOperand(I);
5309     auto *Ty = getLoadStoreType(I);
5310     const Align Alignment = getLoadStoreAlignment(I);
5311     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5312                                 TTI.isLegalMaskedGather(Ty, Alignment))
5313                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5314                                 TTI.isLegalMaskedScatter(Ty, Alignment));
5315   }
5316   case Instruction::UDiv:
5317   case Instruction::SDiv:
5318   case Instruction::SRem:
5319   case Instruction::URem:
5320     return mayDivideByZero(*I);
5321   }
5322   return false;
5323 }
5324 
5325 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5326     Instruction *I, ElementCount VF) {
5327   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5328   assert(getWideningDecision(I, VF) == CM_Unknown &&
5329          "Decision should not be set yet.");
5330   auto *Group = getInterleavedAccessGroup(I);
5331   assert(Group && "Must have a group.");
5332 
5333   // If the instruction's allocated size doesn't equal it's type size, it
5334   // requires padding and will be scalarized.
5335   auto &DL = I->getModule()->getDataLayout();
5336   auto *ScalarTy = getLoadStoreType(I);
5337   if (hasIrregularType(ScalarTy, DL))
5338     return false;
5339 
5340   // Check if masking is required.
5341   // A Group may need masking for one of two reasons: it resides in a block that
5342   // needs predication, or it was decided to use masking to deal with gaps.
5343   bool PredicatedAccessRequiresMasking =
5344       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5345   bool AccessWithGapsRequiresMasking =
5346       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5347   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5348     return true;
5349 
5350   // If masked interleaving is required, we expect that the user/target had
5351   // enabled it, because otherwise it either wouldn't have been created or
5352   // it should have been invalidated by the CostModel.
5353   assert(useMaskedInterleavedAccesses(TTI) &&
5354          "Masked interleave-groups for predicated accesses are not enabled.");
5355 
5356   auto *Ty = getLoadStoreType(I);
5357   const Align Alignment = getLoadStoreAlignment(I);
5358   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5359                           : TTI.isLegalMaskedStore(Ty, Alignment);
5360 }
5361 
5362 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5363     Instruction *I, ElementCount VF) {
5364   // Get and ensure we have a valid memory instruction.
5365   LoadInst *LI = dyn_cast<LoadInst>(I);
5366   StoreInst *SI = dyn_cast<StoreInst>(I);
5367   assert((LI || SI) && "Invalid memory instruction");
5368 
5369   auto *Ptr = getLoadStorePointerOperand(I);
5370 
5371   // In order to be widened, the pointer should be consecutive, first of all.
5372   if (!Legal->isConsecutivePtr(Ptr))
5373     return false;
5374 
5375   // If the instruction is a store located in a predicated block, it will be
5376   // scalarized.
5377   if (isScalarWithPredication(I))
5378     return false;
5379 
5380   // If the instruction's allocated size doesn't equal it's type size, it
5381   // requires padding and will be scalarized.
5382   auto &DL = I->getModule()->getDataLayout();
5383   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5384   if (hasIrregularType(ScalarTy, DL))
5385     return false;
5386 
5387   return true;
5388 }
5389 
5390 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5391   // We should not collect Uniforms more than once per VF. Right now,
5392   // this function is called from collectUniformsAndScalars(), which
5393   // already does this check. Collecting Uniforms for VF=1 does not make any
5394   // sense.
5395 
5396   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5397          "This function should not be visited twice for the same VF");
5398 
5399   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5400   // not analyze again.  Uniforms.count(VF) will return 1.
5401   Uniforms[VF].clear();
5402 
5403   // We now know that the loop is vectorizable!
5404   // Collect instructions inside the loop that will remain uniform after
5405   // vectorization.
5406 
5407   // Global values, params and instructions outside of current loop are out of
5408   // scope.
5409   auto isOutOfScope = [&](Value *V) -> bool {
5410     Instruction *I = dyn_cast<Instruction>(V);
5411     return (!I || !TheLoop->contains(I));
5412   };
5413 
5414   SetVector<Instruction *> Worklist;
5415   BasicBlock *Latch = TheLoop->getLoopLatch();
5416 
5417   // Instructions that are scalar with predication must not be considered
5418   // uniform after vectorization, because that would create an erroneous
5419   // replicating region where only a single instance out of VF should be formed.
5420   // TODO: optimize such seldom cases if found important, see PR40816.
5421   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5422     if (isOutOfScope(I)) {
5423       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5424                         << *I << "\n");
5425       return;
5426     }
5427     if (isScalarWithPredication(I)) {
5428       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5429                         << *I << "\n");
5430       return;
5431     }
5432     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5433     Worklist.insert(I);
5434   };
5435 
5436   // Start with the conditional branch. If the branch condition is an
5437   // instruction contained in the loop that is only used by the branch, it is
5438   // uniform.
5439   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5440   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5441     addToWorklistIfAllowed(Cmp);
5442 
5443   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5444     InstWidening WideningDecision = getWideningDecision(I, VF);
5445     assert(WideningDecision != CM_Unknown &&
5446            "Widening decision should be ready at this moment");
5447 
5448     // A uniform memory op is itself uniform.  We exclude uniform stores
5449     // here as they demand the last lane, not the first one.
5450     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5451       assert(WideningDecision == CM_Scalarize);
5452       return true;
5453     }
5454 
5455     return (WideningDecision == CM_Widen ||
5456             WideningDecision == CM_Widen_Reverse ||
5457             WideningDecision == CM_Interleave);
5458   };
5459 
5460 
5461   // Returns true if Ptr is the pointer operand of a memory access instruction
5462   // I, and I is known to not require scalarization.
5463   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5464     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5465   };
5466 
5467   // Holds a list of values which are known to have at least one uniform use.
5468   // Note that there may be other uses which aren't uniform.  A "uniform use"
5469   // here is something which only demands lane 0 of the unrolled iterations;
5470   // it does not imply that all lanes produce the same value (e.g. this is not
5471   // the usual meaning of uniform)
5472   SetVector<Value *> HasUniformUse;
5473 
5474   // Scan the loop for instructions which are either a) known to have only
5475   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5476   for (auto *BB : TheLoop->blocks())
5477     for (auto &I : *BB) {
5478       // If there's no pointer operand, there's nothing to do.
5479       auto *Ptr = getLoadStorePointerOperand(&I);
5480       if (!Ptr)
5481         continue;
5482 
5483       // A uniform memory op is itself uniform.  We exclude uniform stores
5484       // here as they demand the last lane, not the first one.
5485       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5486         addToWorklistIfAllowed(&I);
5487 
5488       if (isUniformDecision(&I, VF)) {
5489         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5490         HasUniformUse.insert(Ptr);
5491       }
5492     }
5493 
5494   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5495   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5496   // disallows uses outside the loop as well.
5497   for (auto *V : HasUniformUse) {
5498     if (isOutOfScope(V))
5499       continue;
5500     auto *I = cast<Instruction>(V);
5501     auto UsersAreMemAccesses =
5502       llvm::all_of(I->users(), [&](User *U) -> bool {
5503         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5504       });
5505     if (UsersAreMemAccesses)
5506       addToWorklistIfAllowed(I);
5507   }
5508 
5509   // Expand Worklist in topological order: whenever a new instruction
5510   // is added , its users should be already inside Worklist.  It ensures
5511   // a uniform instruction will only be used by uniform instructions.
5512   unsigned idx = 0;
5513   while (idx != Worklist.size()) {
5514     Instruction *I = Worklist[idx++];
5515 
5516     for (auto OV : I->operand_values()) {
5517       // isOutOfScope operands cannot be uniform instructions.
5518       if (isOutOfScope(OV))
5519         continue;
5520       // First order recurrence Phi's should typically be considered
5521       // non-uniform.
5522       auto *OP = dyn_cast<PHINode>(OV);
5523       if (OP && Legal->isFirstOrderRecurrence(OP))
5524         continue;
5525       // If all the users of the operand are uniform, then add the
5526       // operand into the uniform worklist.
5527       auto *OI = cast<Instruction>(OV);
5528       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5529             auto *J = cast<Instruction>(U);
5530             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5531           }))
5532         addToWorklistIfAllowed(OI);
5533     }
5534   }
5535 
5536   // For an instruction to be added into Worklist above, all its users inside
5537   // the loop should also be in Worklist. However, this condition cannot be
5538   // true for phi nodes that form a cyclic dependence. We must process phi
5539   // nodes separately. An induction variable will remain uniform if all users
5540   // of the induction variable and induction variable update remain uniform.
5541   // The code below handles both pointer and non-pointer induction variables.
5542   for (auto &Induction : Legal->getInductionVars()) {
5543     auto *Ind = Induction.first;
5544     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5545 
5546     // Determine if all users of the induction variable are uniform after
5547     // vectorization.
5548     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5549       auto *I = cast<Instruction>(U);
5550       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5551              isVectorizedMemAccessUse(I, Ind);
5552     });
5553     if (!UniformInd)
5554       continue;
5555 
5556     // Determine if all users of the induction variable update instruction are
5557     // uniform after vectorization.
5558     auto UniformIndUpdate =
5559         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5560           auto *I = cast<Instruction>(U);
5561           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5562                  isVectorizedMemAccessUse(I, IndUpdate);
5563         });
5564     if (!UniformIndUpdate)
5565       continue;
5566 
5567     // The induction variable and its update instruction will remain uniform.
5568     addToWorklistIfAllowed(Ind);
5569     addToWorklistIfAllowed(IndUpdate);
5570   }
5571 
5572   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5573 }
5574 
5575 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5576   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5577 
5578   if (Legal->getRuntimePointerChecking()->Need) {
5579     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5580         "runtime pointer checks needed. Enable vectorization of this "
5581         "loop with '#pragma clang loop vectorize(enable)' when "
5582         "compiling with -Os/-Oz",
5583         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5584     return true;
5585   }
5586 
5587   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5588     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5589         "runtime SCEV checks needed. Enable vectorization of this "
5590         "loop with '#pragma clang loop vectorize(enable)' when "
5591         "compiling with -Os/-Oz",
5592         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5593     return true;
5594   }
5595 
5596   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5597   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5598     reportVectorizationFailure("Runtime stride check for small trip count",
5599         "runtime stride == 1 checks needed. Enable vectorization of "
5600         "this loop without such check by compiling with -Os/-Oz",
5601         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5602     return true;
5603   }
5604 
5605   return false;
5606 }
5607 
5608 ElementCount
5609 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5610   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5611     reportVectorizationInfo(
5612         "Disabling scalable vectorization, because target does not "
5613         "support scalable vectors.",
5614         "ScalableVectorsUnsupported", ORE, TheLoop);
5615     return ElementCount::getScalable(0);
5616   }
5617 
5618   if (Hints->isScalableVectorizationDisabled()) {
5619     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5620                             "ScalableVectorizationDisabled", ORE, TheLoop);
5621     return ElementCount::getScalable(0);
5622   }
5623 
5624   auto MaxScalableVF = ElementCount::getScalable(
5625       std::numeric_limits<ElementCount::ScalarTy>::max());
5626 
5627   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5628   // FIXME: While for scalable vectors this is currently sufficient, this should
5629   // be replaced by a more detailed mechanism that filters out specific VFs,
5630   // instead of invalidating vectorization for a whole set of VFs based on the
5631   // MaxVF.
5632 
5633   // Disable scalable vectorization if the loop contains unsupported reductions.
5634   if (!canVectorizeReductions(MaxScalableVF)) {
5635     reportVectorizationInfo(
5636         "Scalable vectorization not supported for the reduction "
5637         "operations found in this loop.",
5638         "ScalableVFUnfeasible", ORE, TheLoop);
5639     return ElementCount::getScalable(0);
5640   }
5641 
5642   // Disable scalable vectorization if the loop contains any instructions
5643   // with element types not supported for scalable vectors.
5644   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5645         return !Ty->isVoidTy() &&
5646                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5647       })) {
5648     reportVectorizationInfo("Scalable vectorization is not supported "
5649                             "for all element types found in this loop.",
5650                             "ScalableVFUnfeasible", ORE, TheLoop);
5651     return ElementCount::getScalable(0);
5652   }
5653 
5654   if (Legal->isSafeForAnyVectorWidth())
5655     return MaxScalableVF;
5656 
5657   // Limit MaxScalableVF by the maximum safe dependence distance.
5658   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5659   MaxScalableVF = ElementCount::getScalable(
5660       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5661   if (!MaxScalableVF)
5662     reportVectorizationInfo(
5663         "Max legal vector width too small, scalable vectorization "
5664         "unfeasible.",
5665         "ScalableVFUnfeasible", ORE, TheLoop);
5666 
5667   return MaxScalableVF;
5668 }
5669 
5670 FixedScalableVFPair
5671 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5672                                                  ElementCount UserVF) {
5673   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5674   unsigned SmallestType, WidestType;
5675   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5676 
5677   // Get the maximum safe dependence distance in bits computed by LAA.
5678   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5679   // the memory accesses that is most restrictive (involved in the smallest
5680   // dependence distance).
5681   unsigned MaxSafeElements =
5682       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5683 
5684   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5685   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5686 
5687   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5688                     << ".\n");
5689   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5690                     << ".\n");
5691 
5692   // First analyze the UserVF, fall back if the UserVF should be ignored.
5693   if (UserVF) {
5694     auto MaxSafeUserVF =
5695         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5696 
5697     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF))
5698       return UserVF;
5699 
5700     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5701 
5702     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5703     // is better to ignore the hint and let the compiler choose a suitable VF.
5704     if (!UserVF.isScalable()) {
5705       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5706                         << " is unsafe, clamping to max safe VF="
5707                         << MaxSafeFixedVF << ".\n");
5708       ORE->emit([&]() {
5709         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5710                                           TheLoop->getStartLoc(),
5711                                           TheLoop->getHeader())
5712                << "User-specified vectorization factor "
5713                << ore::NV("UserVectorizationFactor", UserVF)
5714                << " is unsafe, clamping to maximum safe vectorization factor "
5715                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5716       });
5717       return MaxSafeFixedVF;
5718     }
5719 
5720     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5721                       << " is unsafe. Ignoring scalable UserVF.\n");
5722     ORE->emit([&]() {
5723       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5724                                         TheLoop->getStartLoc(),
5725                                         TheLoop->getHeader())
5726              << "User-specified vectorization factor "
5727              << ore::NV("UserVectorizationFactor", UserVF)
5728              << " is unsafe. Ignoring the hint to let the compiler pick a "
5729                 "suitable VF.";
5730     });
5731   }
5732 
5733   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5734                     << " / " << WidestType << " bits.\n");
5735 
5736   FixedScalableVFPair Result(ElementCount::getFixed(1),
5737                              ElementCount::getScalable(0));
5738   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5739                                            WidestType, MaxSafeFixedVF))
5740     Result.FixedVF = MaxVF;
5741 
5742   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5743                                            WidestType, MaxSafeScalableVF))
5744     if (MaxVF.isScalable()) {
5745       Result.ScalableVF = MaxVF;
5746       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5747                         << "\n");
5748     }
5749 
5750   return Result;
5751 }
5752 
5753 FixedScalableVFPair
5754 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5755   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5756     // TODO: It may by useful to do since it's still likely to be dynamically
5757     // uniform if the target can skip.
5758     reportVectorizationFailure(
5759         "Not inserting runtime ptr check for divergent target",
5760         "runtime pointer checks needed. Not enabled for divergent target",
5761         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5762     return FixedScalableVFPair::getNone();
5763   }
5764 
5765   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5766   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5767   if (TC == 1) {
5768     reportVectorizationFailure("Single iteration (non) loop",
5769         "loop trip count is one, irrelevant for vectorization",
5770         "SingleIterationLoop", ORE, TheLoop);
5771     return FixedScalableVFPair::getNone();
5772   }
5773 
5774   switch (ScalarEpilogueStatus) {
5775   case CM_ScalarEpilogueAllowed:
5776     return computeFeasibleMaxVF(TC, UserVF);
5777   case CM_ScalarEpilogueNotAllowedUsePredicate:
5778     LLVM_FALLTHROUGH;
5779   case CM_ScalarEpilogueNotNeededUsePredicate:
5780     LLVM_DEBUG(
5781         dbgs() << "LV: vector predicate hint/switch found.\n"
5782                << "LV: Not allowing scalar epilogue, creating predicated "
5783                << "vector loop.\n");
5784     break;
5785   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5786     // fallthrough as a special case of OptForSize
5787   case CM_ScalarEpilogueNotAllowedOptSize:
5788     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5789       LLVM_DEBUG(
5790           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5791     else
5792       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5793                         << "count.\n");
5794 
5795     // Bail if runtime checks are required, which are not good when optimising
5796     // for size.
5797     if (runtimeChecksRequired())
5798       return FixedScalableVFPair::getNone();
5799 
5800     break;
5801   }
5802 
5803   // The only loops we can vectorize without a scalar epilogue, are loops with
5804   // a bottom-test and a single exiting block. We'd have to handle the fact
5805   // that not every instruction executes on the last iteration.  This will
5806   // require a lane mask which varies through the vector loop body.  (TODO)
5807   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5808     // If there was a tail-folding hint/switch, but we can't fold the tail by
5809     // masking, fallback to a vectorization with a scalar epilogue.
5810     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5811       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5812                            "scalar epilogue instead.\n");
5813       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5814       return computeFeasibleMaxVF(TC, UserVF);
5815     }
5816     return FixedScalableVFPair::getNone();
5817   }
5818 
5819   // Now try the tail folding
5820 
5821   // Invalidate interleave groups that require an epilogue if we can't mask
5822   // the interleave-group.
5823   if (!useMaskedInterleavedAccesses(TTI)) {
5824     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5825            "No decisions should have been taken at this point");
5826     // Note: There is no need to invalidate any cost modeling decisions here, as
5827     // non where taken so far.
5828     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5829   }
5830 
5831   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF);
5832   // Avoid tail folding if the trip count is known to be a multiple of any VF
5833   // we chose.
5834   // FIXME: The condition below pessimises the case for fixed-width vectors,
5835   // when scalable VFs are also candidates for vectorization.
5836   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5837     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5838     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5839            "MaxFixedVF must be a power of 2");
5840     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5841                                    : MaxFixedVF.getFixedValue();
5842     ScalarEvolution *SE = PSE.getSE();
5843     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5844     const SCEV *ExitCount = SE->getAddExpr(
5845         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5846     const SCEV *Rem = SE->getURemExpr(
5847         SE->applyLoopGuards(ExitCount, TheLoop),
5848         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5849     if (Rem->isZero()) {
5850       // Accept MaxFixedVF if we do not have a tail.
5851       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5852       return MaxFactors;
5853     }
5854   }
5855 
5856   // If we don't know the precise trip count, or if the trip count that we
5857   // found modulo the vectorization factor is not zero, try to fold the tail
5858   // by masking.
5859   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5860   if (Legal->prepareToFoldTailByMasking()) {
5861     FoldTailByMasking = true;
5862     return MaxFactors;
5863   }
5864 
5865   // If there was a tail-folding hint/switch, but we can't fold the tail by
5866   // masking, fallback to a vectorization with a scalar epilogue.
5867   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5868     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5869                          "scalar epilogue instead.\n");
5870     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5871     return MaxFactors;
5872   }
5873 
5874   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5875     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5876     return FixedScalableVFPair::getNone();
5877   }
5878 
5879   if (TC == 0) {
5880     reportVectorizationFailure(
5881         "Unable to calculate the loop count due to complex control flow",
5882         "unable to calculate the loop count due to complex control flow",
5883         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5884     return FixedScalableVFPair::getNone();
5885   }
5886 
5887   reportVectorizationFailure(
5888       "Cannot optimize for size and vectorize at the same time.",
5889       "cannot optimize for size and vectorize at the same time. "
5890       "Enable vectorization of this loop with '#pragma clang loop "
5891       "vectorize(enable)' when compiling with -Os/-Oz",
5892       "NoTailLoopWithOptForSize", ORE, TheLoop);
5893   return FixedScalableVFPair::getNone();
5894 }
5895 
5896 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5897     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5898     const ElementCount &MaxSafeVF) {
5899   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5900   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5901       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5902                            : TargetTransformInfo::RGK_FixedWidthVector);
5903 
5904   // Convenience function to return the minimum of two ElementCounts.
5905   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5906     assert((LHS.isScalable() == RHS.isScalable()) &&
5907            "Scalable flags must match");
5908     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5909   };
5910 
5911   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5912   // Note that both WidestRegister and WidestType may not be a powers of 2.
5913   auto MaxVectorElementCount = ElementCount::get(
5914       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5915       ComputeScalableMaxVF);
5916   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5917   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5918                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5919 
5920   if (!MaxVectorElementCount) {
5921     LLVM_DEBUG(dbgs() << "LV: The target has no "
5922                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5923                       << " vector registers.\n");
5924     return ElementCount::getFixed(1);
5925   }
5926 
5927   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5928   if (ConstTripCount &&
5929       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5930       isPowerOf2_32(ConstTripCount)) {
5931     // We need to clamp the VF to be the ConstTripCount. There is no point in
5932     // choosing a higher viable VF as done in the loop below. If
5933     // MaxVectorElementCount is scalable, we only fall back on a fixed VF when
5934     // the TC is less than or equal to the known number of lanes.
5935     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5936                       << ConstTripCount << "\n");
5937     return TripCountEC;
5938   }
5939 
5940   ElementCount MaxVF = MaxVectorElementCount;
5941   if (TTI.shouldMaximizeVectorBandwidth() ||
5942       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5943     auto MaxVectorElementCountMaxBW = ElementCount::get(
5944         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5945         ComputeScalableMaxVF);
5946     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5947 
5948     // Collect all viable vectorization factors larger than the default MaxVF
5949     // (i.e. MaxVectorElementCount).
5950     SmallVector<ElementCount, 8> VFs;
5951     for (ElementCount VS = MaxVectorElementCount * 2;
5952          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5953       VFs.push_back(VS);
5954 
5955     // For each VF calculate its register usage.
5956     auto RUs = calculateRegisterUsage(VFs);
5957 
5958     // Select the largest VF which doesn't require more registers than existing
5959     // ones.
5960     for (int i = RUs.size() - 1; i >= 0; --i) {
5961       bool Selected = true;
5962       for (auto &pair : RUs[i].MaxLocalUsers) {
5963         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5964         if (pair.second > TargetNumRegisters)
5965           Selected = false;
5966       }
5967       if (Selected) {
5968         MaxVF = VFs[i];
5969         break;
5970       }
5971     }
5972     if (ElementCount MinVF =
5973             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5974       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5975         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5976                           << ") with target's minimum: " << MinVF << '\n');
5977         MaxVF = MinVF;
5978       }
5979     }
5980   }
5981   return MaxVF;
5982 }
5983 
5984 bool LoopVectorizationCostModel::isMoreProfitable(
5985     const VectorizationFactor &A, const VectorizationFactor &B) const {
5986   InstructionCost::CostType CostA = *A.Cost.getValue();
5987   InstructionCost::CostType CostB = *B.Cost.getValue();
5988 
5989   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5990 
5991   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5992       MaxTripCount) {
5993     // If we are folding the tail and the trip count is a known (possibly small)
5994     // constant, the trip count will be rounded up to an integer number of
5995     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5996     // which we compare directly. When not folding the tail, the total cost will
5997     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5998     // approximated with the per-lane cost below instead of using the tripcount
5999     // as here.
6000     int64_t RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
6001     int64_t RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
6002     return RTCostA < RTCostB;
6003   }
6004 
6005   // When set to preferred, for now assume vscale may be larger than 1, so
6006   // that scalable vectorization is slightly favorable over fixed-width
6007   // vectorization.
6008   if (Hints->isScalableVectorizationPreferred())
6009     if (A.Width.isScalable() && !B.Width.isScalable())
6010       return (CostA * B.Width.getKnownMinValue()) <=
6011              (CostB * A.Width.getKnownMinValue());
6012 
6013   // To avoid the need for FP division:
6014   //      (CostA / A.Width) < (CostB / B.Width)
6015   // <=>  (CostA * B.Width) < (CostB * A.Width)
6016   return (CostA * B.Width.getKnownMinValue()) <
6017          (CostB * A.Width.getKnownMinValue());
6018 }
6019 
6020 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
6021     const ElementCountSet &VFCandidates) {
6022   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
6023   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
6024   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
6025   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
6026          "Expected Scalar VF to be a candidate");
6027 
6028   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
6029   VectorizationFactor ChosenFactor = ScalarCost;
6030 
6031   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6032   if (ForceVectorization && VFCandidates.size() > 1) {
6033     // Ignore scalar width, because the user explicitly wants vectorization.
6034     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6035     // evaluation.
6036     ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max();
6037   }
6038 
6039   for (const auto &i : VFCandidates) {
6040     // The cost for scalar VF=1 is already calculated, so ignore it.
6041     if (i.isScalar())
6042       continue;
6043 
6044     // Notice that the vector loop needs to be executed less times, so
6045     // we need to divide the cost of the vector loops by the width of
6046     // the vector elements.
6047     VectorizationCostTy C = expectedCost(i);
6048 
6049     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
6050     VectorizationFactor Candidate(i, C.first);
6051     LLVM_DEBUG(
6052         dbgs() << "LV: Vector loop of width " << i << " costs: "
6053                << (*Candidate.Cost.getValue() /
6054                    Candidate.Width.getKnownMinValue())
6055                << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "")
6056                << ".\n");
6057 
6058     if (!C.second && !ForceVectorization) {
6059       LLVM_DEBUG(
6060           dbgs() << "LV: Not considering vector loop of width " << i
6061                  << " because it will not generate any vector instructions.\n");
6062       continue;
6063     }
6064 
6065     // If profitable add it to ProfitableVF list.
6066     if (isMoreProfitable(Candidate, ScalarCost))
6067       ProfitableVFs.push_back(Candidate);
6068 
6069     if (isMoreProfitable(Candidate, ChosenFactor))
6070       ChosenFactor = Candidate;
6071   }
6072 
6073   if (!EnableCondStoresVectorization && NumPredStores) {
6074     reportVectorizationFailure("There are conditional stores.",
6075         "store that is conditionally executed prevents vectorization",
6076         "ConditionalStore", ORE, TheLoop);
6077     ChosenFactor = ScalarCost;
6078   }
6079 
6080   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
6081                  *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue())
6082                  dbgs()
6083              << "LV: Vectorization seems to be not beneficial, "
6084              << "but was forced by a user.\n");
6085   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
6086   return ChosenFactor;
6087 }
6088 
6089 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
6090     const Loop &L, ElementCount VF) const {
6091   // Cross iteration phis such as reductions need special handling and are
6092   // currently unsupported.
6093   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
6094         return Legal->isFirstOrderRecurrence(&Phi) ||
6095                Legal->isReductionVariable(&Phi);
6096       }))
6097     return false;
6098 
6099   // Phis with uses outside of the loop require special handling and are
6100   // currently unsupported.
6101   for (auto &Entry : Legal->getInductionVars()) {
6102     // Look for uses of the value of the induction at the last iteration.
6103     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
6104     for (User *U : PostInc->users())
6105       if (!L.contains(cast<Instruction>(U)))
6106         return false;
6107     // Look for uses of penultimate value of the induction.
6108     for (User *U : Entry.first->users())
6109       if (!L.contains(cast<Instruction>(U)))
6110         return false;
6111   }
6112 
6113   // Induction variables that are widened require special handling that is
6114   // currently not supported.
6115   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
6116         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
6117                  this->isProfitableToScalarize(Entry.first, VF));
6118       }))
6119     return false;
6120 
6121   // Epilogue vectorization code has not been auditted to ensure it handles
6122   // non-latch exits properly.  It may be fine, but it needs auditted and
6123   // tested.
6124   if (L.getExitingBlock() != L.getLoopLatch())
6125     return false;
6126 
6127   return true;
6128 }
6129 
6130 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
6131     const ElementCount VF) const {
6132   // FIXME: We need a much better cost-model to take different parameters such
6133   // as register pressure, code size increase and cost of extra branches into
6134   // account. For now we apply a very crude heuristic and only consider loops
6135   // with vectorization factors larger than a certain value.
6136   // We also consider epilogue vectorization unprofitable for targets that don't
6137   // consider interleaving beneficial (eg. MVE).
6138   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
6139     return false;
6140   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
6141     return true;
6142   return false;
6143 }
6144 
6145 VectorizationFactor
6146 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
6147     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
6148   VectorizationFactor Result = VectorizationFactor::Disabled();
6149   if (!EnableEpilogueVectorization) {
6150     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
6151     return Result;
6152   }
6153 
6154   if (!isScalarEpilogueAllowed()) {
6155     LLVM_DEBUG(
6156         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
6157                   "allowed.\n";);
6158     return Result;
6159   }
6160 
6161   // FIXME: This can be fixed for scalable vectors later, because at this stage
6162   // the LoopVectorizer will only consider vectorizing a loop with scalable
6163   // vectors when the loop has a hint to enable vectorization for a given VF.
6164   if (MainLoopVF.isScalable()) {
6165     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
6166                          "yet supported.\n");
6167     return Result;
6168   }
6169 
6170   // Not really a cost consideration, but check for unsupported cases here to
6171   // simplify the logic.
6172   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
6173     LLVM_DEBUG(
6174         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
6175                   "not a supported candidate.\n";);
6176     return Result;
6177   }
6178 
6179   if (EpilogueVectorizationForceVF > 1) {
6180     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6181     if (LVP.hasPlanWithVFs(
6182             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6183       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6184     else {
6185       LLVM_DEBUG(
6186           dbgs()
6187               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6188       return Result;
6189     }
6190   }
6191 
6192   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6193       TheLoop->getHeader()->getParent()->hasMinSize()) {
6194     LLVM_DEBUG(
6195         dbgs()
6196             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6197     return Result;
6198   }
6199 
6200   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6201     return Result;
6202 
6203   for (auto &NextVF : ProfitableVFs)
6204     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6205         (Result.Width.getFixedValue() == 1 ||
6206          isMoreProfitable(NextVF, Result)) &&
6207         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6208       Result = NextVF;
6209 
6210   if (Result != VectorizationFactor::Disabled())
6211     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6212                       << Result.Width.getFixedValue() << "\n";);
6213   return Result;
6214 }
6215 
6216 std::pair<unsigned, unsigned>
6217 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6218   unsigned MinWidth = -1U;
6219   unsigned MaxWidth = 8;
6220   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6221   for (Type *T : ElementTypesInLoop) {
6222     MinWidth = std::min<unsigned>(
6223         MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
6224     MaxWidth = std::max<unsigned>(
6225         MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
6226   }
6227   return {MinWidth, MaxWidth};
6228 }
6229 
6230 void LoopVectorizationCostModel::collectElementTypesForWidening() {
6231   ElementTypesInLoop.clear();
6232   // For each block.
6233   for (BasicBlock *BB : TheLoop->blocks()) {
6234     // For each instruction in the loop.
6235     for (Instruction &I : BB->instructionsWithoutDebug()) {
6236       Type *T = I.getType();
6237 
6238       // Skip ignored values.
6239       if (ValuesToIgnore.count(&I))
6240         continue;
6241 
6242       // Only examine Loads, Stores and PHINodes.
6243       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6244         continue;
6245 
6246       // Examine PHI nodes that are reduction variables. Update the type to
6247       // account for the recurrence type.
6248       if (auto *PN = dyn_cast<PHINode>(&I)) {
6249         if (!Legal->isReductionVariable(PN))
6250           continue;
6251         const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN];
6252         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6253             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6254                                       RdxDesc.getRecurrenceType(),
6255                                       TargetTransformInfo::ReductionFlags()))
6256           continue;
6257         T = RdxDesc.getRecurrenceType();
6258       }
6259 
6260       // Examine the stored values.
6261       if (auto *ST = dyn_cast<StoreInst>(&I))
6262         T = ST->getValueOperand()->getType();
6263 
6264       // Ignore loaded pointer types and stored pointer types that are not
6265       // vectorizable.
6266       //
6267       // FIXME: The check here attempts to predict whether a load or store will
6268       //        be vectorized. We only know this for certain after a VF has
6269       //        been selected. Here, we assume that if an access can be
6270       //        vectorized, it will be. We should also look at extending this
6271       //        optimization to non-pointer types.
6272       //
6273       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6274           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6275         continue;
6276 
6277       ElementTypesInLoop.insert(T);
6278     }
6279   }
6280 }
6281 
6282 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6283                                                            unsigned LoopCost) {
6284   // -- The interleave heuristics --
6285   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6286   // There are many micro-architectural considerations that we can't predict
6287   // at this level. For example, frontend pressure (on decode or fetch) due to
6288   // code size, or the number and capabilities of the execution ports.
6289   //
6290   // We use the following heuristics to select the interleave count:
6291   // 1. If the code has reductions, then we interleave to break the cross
6292   // iteration dependency.
6293   // 2. If the loop is really small, then we interleave to reduce the loop
6294   // overhead.
6295   // 3. We don't interleave if we think that we will spill registers to memory
6296   // due to the increased register pressure.
6297 
6298   if (!isScalarEpilogueAllowed())
6299     return 1;
6300 
6301   // We used the distance for the interleave count.
6302   if (Legal->getMaxSafeDepDistBytes() != -1U)
6303     return 1;
6304 
6305   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6306   const bool HasReductions = !Legal->getReductionVars().empty();
6307   // Do not interleave loops with a relatively small known or estimated trip
6308   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6309   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6310   // because with the above conditions interleaving can expose ILP and break
6311   // cross iteration dependences for reductions.
6312   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6313       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6314     return 1;
6315 
6316   RegisterUsage R = calculateRegisterUsage({VF})[0];
6317   // We divide by these constants so assume that we have at least one
6318   // instruction that uses at least one register.
6319   for (auto& pair : R.MaxLocalUsers) {
6320     pair.second = std::max(pair.second, 1U);
6321   }
6322 
6323   // We calculate the interleave count using the following formula.
6324   // Subtract the number of loop invariants from the number of available
6325   // registers. These registers are used by all of the interleaved instances.
6326   // Next, divide the remaining registers by the number of registers that is
6327   // required by the loop, in order to estimate how many parallel instances
6328   // fit without causing spills. All of this is rounded down if necessary to be
6329   // a power of two. We want power of two interleave count to simplify any
6330   // addressing operations or alignment considerations.
6331   // We also want power of two interleave counts to ensure that the induction
6332   // variable of the vector loop wraps to zero, when tail is folded by masking;
6333   // this currently happens when OptForSize, in which case IC is set to 1 above.
6334   unsigned IC = UINT_MAX;
6335 
6336   for (auto& pair : R.MaxLocalUsers) {
6337     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6338     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6339                       << " registers of "
6340                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6341     if (VF.isScalar()) {
6342       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6343         TargetNumRegisters = ForceTargetNumScalarRegs;
6344     } else {
6345       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6346         TargetNumRegisters = ForceTargetNumVectorRegs;
6347     }
6348     unsigned MaxLocalUsers = pair.second;
6349     unsigned LoopInvariantRegs = 0;
6350     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6351       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6352 
6353     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6354     // Don't count the induction variable as interleaved.
6355     if (EnableIndVarRegisterHeur) {
6356       TmpIC =
6357           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6358                         std::max(1U, (MaxLocalUsers - 1)));
6359     }
6360 
6361     IC = std::min(IC, TmpIC);
6362   }
6363 
6364   // Clamp the interleave ranges to reasonable counts.
6365   unsigned MaxInterleaveCount =
6366       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6367 
6368   // Check if the user has overridden the max.
6369   if (VF.isScalar()) {
6370     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6371       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6372   } else {
6373     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6374       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6375   }
6376 
6377   // If trip count is known or estimated compile time constant, limit the
6378   // interleave count to be less than the trip count divided by VF, provided it
6379   // is at least 1.
6380   //
6381   // For scalable vectors we can't know if interleaving is beneficial. It may
6382   // not be beneficial for small loops if none of the lanes in the second vector
6383   // iterations is enabled. However, for larger loops, there is likely to be a
6384   // similar benefit as for fixed-width vectors. For now, we choose to leave
6385   // the InterleaveCount as if vscale is '1', although if some information about
6386   // the vector is known (e.g. min vector size), we can make a better decision.
6387   if (BestKnownTC) {
6388     MaxInterleaveCount =
6389         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6390     // Make sure MaxInterleaveCount is greater than 0.
6391     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6392   }
6393 
6394   assert(MaxInterleaveCount > 0 &&
6395          "Maximum interleave count must be greater than 0");
6396 
6397   // Clamp the calculated IC to be between the 1 and the max interleave count
6398   // that the target and trip count allows.
6399   if (IC > MaxInterleaveCount)
6400     IC = MaxInterleaveCount;
6401   else
6402     // Make sure IC is greater than 0.
6403     IC = std::max(1u, IC);
6404 
6405   assert(IC > 0 && "Interleave count must be greater than 0.");
6406 
6407   // If we did not calculate the cost for VF (because the user selected the VF)
6408   // then we calculate the cost of VF here.
6409   if (LoopCost == 0) {
6410     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6411     LoopCost = *expectedCost(VF).first.getValue();
6412   }
6413 
6414   assert(LoopCost && "Non-zero loop cost expected");
6415 
6416   // Interleave if we vectorized this loop and there is a reduction that could
6417   // benefit from interleaving.
6418   if (VF.isVector() && HasReductions) {
6419     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6420     return IC;
6421   }
6422 
6423   // Note that if we've already vectorized the loop we will have done the
6424   // runtime check and so interleaving won't require further checks.
6425   bool InterleavingRequiresRuntimePointerCheck =
6426       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6427 
6428   // We want to interleave small loops in order to reduce the loop overhead and
6429   // potentially expose ILP opportunities.
6430   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6431                     << "LV: IC is " << IC << '\n'
6432                     << "LV: VF is " << VF << '\n');
6433   const bool AggressivelyInterleaveReductions =
6434       TTI.enableAggressiveInterleaving(HasReductions);
6435   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6436     // We assume that the cost overhead is 1 and we use the cost model
6437     // to estimate the cost of the loop and interleave until the cost of the
6438     // loop overhead is about 5% of the cost of the loop.
6439     unsigned SmallIC =
6440         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6441 
6442     // Interleave until store/load ports (estimated by max interleave count) are
6443     // saturated.
6444     unsigned NumStores = Legal->getNumStores();
6445     unsigned NumLoads = Legal->getNumLoads();
6446     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6447     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6448 
6449     // If we have a scalar reduction (vector reductions are already dealt with
6450     // by this point), we can increase the critical path length if the loop
6451     // we're interleaving is inside another loop. Limit, by default to 2, so the
6452     // critical path only gets increased by one reduction operation.
6453     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6454       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6455       SmallIC = std::min(SmallIC, F);
6456       StoresIC = std::min(StoresIC, F);
6457       LoadsIC = std::min(LoadsIC, F);
6458     }
6459 
6460     if (EnableLoadStoreRuntimeInterleave &&
6461         std::max(StoresIC, LoadsIC) > SmallIC) {
6462       LLVM_DEBUG(
6463           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6464       return std::max(StoresIC, LoadsIC);
6465     }
6466 
6467     // If there are scalar reductions and TTI has enabled aggressive
6468     // interleaving for reductions, we will interleave to expose ILP.
6469     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6470         AggressivelyInterleaveReductions) {
6471       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6472       // Interleave no less than SmallIC but not as aggressive as the normal IC
6473       // to satisfy the rare situation when resources are too limited.
6474       return std::max(IC / 2, SmallIC);
6475     } else {
6476       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6477       return SmallIC;
6478     }
6479   }
6480 
6481   // Interleave if this is a large loop (small loops are already dealt with by
6482   // this point) that could benefit from interleaving.
6483   if (AggressivelyInterleaveReductions) {
6484     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6485     return IC;
6486   }
6487 
6488   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6489   return 1;
6490 }
6491 
6492 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6493 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6494   // This function calculates the register usage by measuring the highest number
6495   // of values that are alive at a single location. Obviously, this is a very
6496   // rough estimation. We scan the loop in a topological order in order and
6497   // assign a number to each instruction. We use RPO to ensure that defs are
6498   // met before their users. We assume that each instruction that has in-loop
6499   // users starts an interval. We record every time that an in-loop value is
6500   // used, so we have a list of the first and last occurrences of each
6501   // instruction. Next, we transpose this data structure into a multi map that
6502   // holds the list of intervals that *end* at a specific location. This multi
6503   // map allows us to perform a linear search. We scan the instructions linearly
6504   // and record each time that a new interval starts, by placing it in a set.
6505   // If we find this value in the multi-map then we remove it from the set.
6506   // The max register usage is the maximum size of the set.
6507   // We also search for instructions that are defined outside the loop, but are
6508   // used inside the loop. We need this number separately from the max-interval
6509   // usage number because when we unroll, loop-invariant values do not take
6510   // more register.
6511   LoopBlocksDFS DFS(TheLoop);
6512   DFS.perform(LI);
6513 
6514   RegisterUsage RU;
6515 
6516   // Each 'key' in the map opens a new interval. The values
6517   // of the map are the index of the 'last seen' usage of the
6518   // instruction that is the key.
6519   using IntervalMap = DenseMap<Instruction *, unsigned>;
6520 
6521   // Maps instruction to its index.
6522   SmallVector<Instruction *, 64> IdxToInstr;
6523   // Marks the end of each interval.
6524   IntervalMap EndPoint;
6525   // Saves the list of instruction indices that are used in the loop.
6526   SmallPtrSet<Instruction *, 8> Ends;
6527   // Saves the list of values that are used in the loop but are
6528   // defined outside the loop, such as arguments and constants.
6529   SmallPtrSet<Value *, 8> LoopInvariants;
6530 
6531   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6532     for (Instruction &I : BB->instructionsWithoutDebug()) {
6533       IdxToInstr.push_back(&I);
6534 
6535       // Save the end location of each USE.
6536       for (Value *U : I.operands()) {
6537         auto *Instr = dyn_cast<Instruction>(U);
6538 
6539         // Ignore non-instruction values such as arguments, constants, etc.
6540         if (!Instr)
6541           continue;
6542 
6543         // If this instruction is outside the loop then record it and continue.
6544         if (!TheLoop->contains(Instr)) {
6545           LoopInvariants.insert(Instr);
6546           continue;
6547         }
6548 
6549         // Overwrite previous end points.
6550         EndPoint[Instr] = IdxToInstr.size();
6551         Ends.insert(Instr);
6552       }
6553     }
6554   }
6555 
6556   // Saves the list of intervals that end with the index in 'key'.
6557   using InstrList = SmallVector<Instruction *, 2>;
6558   DenseMap<unsigned, InstrList> TransposeEnds;
6559 
6560   // Transpose the EndPoints to a list of values that end at each index.
6561   for (auto &Interval : EndPoint)
6562     TransposeEnds[Interval.second].push_back(Interval.first);
6563 
6564   SmallPtrSet<Instruction *, 8> OpenIntervals;
6565   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6566   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6567 
6568   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6569 
6570   // A lambda that gets the register usage for the given type and VF.
6571   const auto &TTICapture = TTI;
6572   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6573     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6574       return 0;
6575     return *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6576   };
6577 
6578   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6579     Instruction *I = IdxToInstr[i];
6580 
6581     // Remove all of the instructions that end at this location.
6582     InstrList &List = TransposeEnds[i];
6583     for (Instruction *ToRemove : List)
6584       OpenIntervals.erase(ToRemove);
6585 
6586     // Ignore instructions that are never used within the loop.
6587     if (!Ends.count(I))
6588       continue;
6589 
6590     // Skip ignored values.
6591     if (ValuesToIgnore.count(I))
6592       continue;
6593 
6594     // For each VF find the maximum usage of registers.
6595     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6596       // Count the number of live intervals.
6597       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6598 
6599       if (VFs[j].isScalar()) {
6600         for (auto Inst : OpenIntervals) {
6601           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6602           if (RegUsage.find(ClassID) == RegUsage.end())
6603             RegUsage[ClassID] = 1;
6604           else
6605             RegUsage[ClassID] += 1;
6606         }
6607       } else {
6608         collectUniformsAndScalars(VFs[j]);
6609         for (auto Inst : OpenIntervals) {
6610           // Skip ignored values for VF > 1.
6611           if (VecValuesToIgnore.count(Inst))
6612             continue;
6613           if (isScalarAfterVectorization(Inst, VFs[j])) {
6614             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6615             if (RegUsage.find(ClassID) == RegUsage.end())
6616               RegUsage[ClassID] = 1;
6617             else
6618               RegUsage[ClassID] += 1;
6619           } else {
6620             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6621             if (RegUsage.find(ClassID) == RegUsage.end())
6622               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6623             else
6624               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6625           }
6626         }
6627       }
6628 
6629       for (auto& pair : RegUsage) {
6630         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6631           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6632         else
6633           MaxUsages[j][pair.first] = pair.second;
6634       }
6635     }
6636 
6637     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6638                       << OpenIntervals.size() << '\n');
6639 
6640     // Add the current instruction to the list of open intervals.
6641     OpenIntervals.insert(I);
6642   }
6643 
6644   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6645     SmallMapVector<unsigned, unsigned, 4> Invariant;
6646 
6647     for (auto Inst : LoopInvariants) {
6648       unsigned Usage =
6649           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6650       unsigned ClassID =
6651           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6652       if (Invariant.find(ClassID) == Invariant.end())
6653         Invariant[ClassID] = Usage;
6654       else
6655         Invariant[ClassID] += Usage;
6656     }
6657 
6658     LLVM_DEBUG({
6659       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6660       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6661              << " item\n";
6662       for (const auto &pair : MaxUsages[i]) {
6663         dbgs() << "LV(REG): RegisterClass: "
6664                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6665                << " registers\n";
6666       }
6667       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6668              << " item\n";
6669       for (const auto &pair : Invariant) {
6670         dbgs() << "LV(REG): RegisterClass: "
6671                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6672                << " registers\n";
6673       }
6674     });
6675 
6676     RU.LoopInvariantRegs = Invariant;
6677     RU.MaxLocalUsers = MaxUsages[i];
6678     RUs[i] = RU;
6679   }
6680 
6681   return RUs;
6682 }
6683 
6684 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6685   // TODO: Cost model for emulated masked load/store is completely
6686   // broken. This hack guides the cost model to use an artificially
6687   // high enough value to practically disable vectorization with such
6688   // operations, except where previously deployed legality hack allowed
6689   // using very low cost values. This is to avoid regressions coming simply
6690   // from moving "masked load/store" check from legality to cost model.
6691   // Masked Load/Gather emulation was previously never allowed.
6692   // Limited number of Masked Store/Scatter emulation was allowed.
6693   assert(isPredicatedInst(I) &&
6694          "Expecting a scalar emulated instruction");
6695   return isa<LoadInst>(I) ||
6696          (isa<StoreInst>(I) &&
6697           NumPredStores > NumberOfStoresToPredicate);
6698 }
6699 
6700 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6701   // If we aren't vectorizing the loop, or if we've already collected the
6702   // instructions to scalarize, there's nothing to do. Collection may already
6703   // have occurred if we have a user-selected VF and are now computing the
6704   // expected cost for interleaving.
6705   if (VF.isScalar() || VF.isZero() ||
6706       InstsToScalarize.find(VF) != InstsToScalarize.end())
6707     return;
6708 
6709   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6710   // not profitable to scalarize any instructions, the presence of VF in the
6711   // map will indicate that we've analyzed it already.
6712   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6713 
6714   // Find all the instructions that are scalar with predication in the loop and
6715   // determine if it would be better to not if-convert the blocks they are in.
6716   // If so, we also record the instructions to scalarize.
6717   for (BasicBlock *BB : TheLoop->blocks()) {
6718     if (!blockNeedsPredication(BB))
6719       continue;
6720     for (Instruction &I : *BB)
6721       if (isScalarWithPredication(&I)) {
6722         ScalarCostsTy ScalarCosts;
6723         // Do not apply discount logic if hacked cost is needed
6724         // for emulated masked memrefs.
6725         if (!useEmulatedMaskMemRefHack(&I) &&
6726             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6727           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6728         // Remember that BB will remain after vectorization.
6729         PredicatedBBsAfterVectorization.insert(BB);
6730       }
6731   }
6732 }
6733 
6734 int LoopVectorizationCostModel::computePredInstDiscount(
6735     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6736   assert(!isUniformAfterVectorization(PredInst, VF) &&
6737          "Instruction marked uniform-after-vectorization will be predicated");
6738 
6739   // Initialize the discount to zero, meaning that the scalar version and the
6740   // vector version cost the same.
6741   InstructionCost Discount = 0;
6742 
6743   // Holds instructions to analyze. The instructions we visit are mapped in
6744   // ScalarCosts. Those instructions are the ones that would be scalarized if
6745   // we find that the scalar version costs less.
6746   SmallVector<Instruction *, 8> Worklist;
6747 
6748   // Returns true if the given instruction can be scalarized.
6749   auto canBeScalarized = [&](Instruction *I) -> bool {
6750     // We only attempt to scalarize instructions forming a single-use chain
6751     // from the original predicated block that would otherwise be vectorized.
6752     // Although not strictly necessary, we give up on instructions we know will
6753     // already be scalar to avoid traversing chains that are unlikely to be
6754     // beneficial.
6755     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6756         isScalarAfterVectorization(I, VF))
6757       return false;
6758 
6759     // If the instruction is scalar with predication, it will be analyzed
6760     // separately. We ignore it within the context of PredInst.
6761     if (isScalarWithPredication(I))
6762       return false;
6763 
6764     // If any of the instruction's operands are uniform after vectorization,
6765     // the instruction cannot be scalarized. This prevents, for example, a
6766     // masked load from being scalarized.
6767     //
6768     // We assume we will only emit a value for lane zero of an instruction
6769     // marked uniform after vectorization, rather than VF identical values.
6770     // Thus, if we scalarize an instruction that uses a uniform, we would
6771     // create uses of values corresponding to the lanes we aren't emitting code
6772     // for. This behavior can be changed by allowing getScalarValue to clone
6773     // the lane zero values for uniforms rather than asserting.
6774     for (Use &U : I->operands())
6775       if (auto *J = dyn_cast<Instruction>(U.get()))
6776         if (isUniformAfterVectorization(J, VF))
6777           return false;
6778 
6779     // Otherwise, we can scalarize the instruction.
6780     return true;
6781   };
6782 
6783   // Compute the expected cost discount from scalarizing the entire expression
6784   // feeding the predicated instruction. We currently only consider expressions
6785   // that are single-use instruction chains.
6786   Worklist.push_back(PredInst);
6787   while (!Worklist.empty()) {
6788     Instruction *I = Worklist.pop_back_val();
6789 
6790     // If we've already analyzed the instruction, there's nothing to do.
6791     if (ScalarCosts.find(I) != ScalarCosts.end())
6792       continue;
6793 
6794     // Compute the cost of the vector instruction. Note that this cost already
6795     // includes the scalarization overhead of the predicated instruction.
6796     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6797 
6798     // Compute the cost of the scalarized instruction. This cost is the cost of
6799     // the instruction as if it wasn't if-converted and instead remained in the
6800     // predicated block. We will scale this cost by block probability after
6801     // computing the scalarization overhead.
6802     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6803     InstructionCost ScalarCost =
6804         VF.getKnownMinValue() *
6805         getInstructionCost(I, ElementCount::getFixed(1)).first;
6806 
6807     // Compute the scalarization overhead of needed insertelement instructions
6808     // and phi nodes.
6809     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6810       ScalarCost += TTI.getScalarizationOverhead(
6811           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6812           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6813       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6814       ScalarCost +=
6815           VF.getKnownMinValue() *
6816           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6817     }
6818 
6819     // Compute the scalarization overhead of needed extractelement
6820     // instructions. For each of the instruction's operands, if the operand can
6821     // be scalarized, add it to the worklist; otherwise, account for the
6822     // overhead.
6823     for (Use &U : I->operands())
6824       if (auto *J = dyn_cast<Instruction>(U.get())) {
6825         assert(VectorType::isValidElementType(J->getType()) &&
6826                "Instruction has non-scalar type");
6827         if (canBeScalarized(J))
6828           Worklist.push_back(J);
6829         else if (needsExtract(J, VF)) {
6830           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6831           ScalarCost += TTI.getScalarizationOverhead(
6832               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6833               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6834         }
6835       }
6836 
6837     // Scale the total scalar cost by block probability.
6838     ScalarCost /= getReciprocalPredBlockProb();
6839 
6840     // Compute the discount. A non-negative discount means the vector version
6841     // of the instruction costs more, and scalarizing would be beneficial.
6842     Discount += VectorCost - ScalarCost;
6843     ScalarCosts[I] = ScalarCost;
6844   }
6845 
6846   return *Discount.getValue();
6847 }
6848 
6849 LoopVectorizationCostModel::VectorizationCostTy
6850 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6851   VectorizationCostTy Cost;
6852 
6853   // For each block.
6854   for (BasicBlock *BB : TheLoop->blocks()) {
6855     VectorizationCostTy BlockCost;
6856 
6857     // For each instruction in the old loop.
6858     for (Instruction &I : BB->instructionsWithoutDebug()) {
6859       // Skip ignored values.
6860       if (ValuesToIgnore.count(&I) ||
6861           (VF.isVector() && VecValuesToIgnore.count(&I)))
6862         continue;
6863 
6864       VectorizationCostTy C = getInstructionCost(&I, VF);
6865 
6866       // Check if we should override the cost.
6867       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6868         C.first = InstructionCost(ForceTargetInstructionCost);
6869 
6870       BlockCost.first += C.first;
6871       BlockCost.second |= C.second;
6872       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6873                         << " for VF " << VF << " For instruction: " << I
6874                         << '\n');
6875     }
6876 
6877     // If we are vectorizing a predicated block, it will have been
6878     // if-converted. This means that the block's instructions (aside from
6879     // stores and instructions that may divide by zero) will now be
6880     // unconditionally executed. For the scalar case, we may not always execute
6881     // the predicated block, if it is an if-else block. Thus, scale the block's
6882     // cost by the probability of executing it. blockNeedsPredication from
6883     // Legal is used so as to not include all blocks in tail folded loops.
6884     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6885       BlockCost.first /= getReciprocalPredBlockProb();
6886 
6887     Cost.first += BlockCost.first;
6888     Cost.second |= BlockCost.second;
6889   }
6890 
6891   return Cost;
6892 }
6893 
6894 /// Gets Address Access SCEV after verifying that the access pattern
6895 /// is loop invariant except the induction variable dependence.
6896 ///
6897 /// This SCEV can be sent to the Target in order to estimate the address
6898 /// calculation cost.
6899 static const SCEV *getAddressAccessSCEV(
6900               Value *Ptr,
6901               LoopVectorizationLegality *Legal,
6902               PredicatedScalarEvolution &PSE,
6903               const Loop *TheLoop) {
6904 
6905   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6906   if (!Gep)
6907     return nullptr;
6908 
6909   // We are looking for a gep with all loop invariant indices except for one
6910   // which should be an induction variable.
6911   auto SE = PSE.getSE();
6912   unsigned NumOperands = Gep->getNumOperands();
6913   for (unsigned i = 1; i < NumOperands; ++i) {
6914     Value *Opd = Gep->getOperand(i);
6915     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6916         !Legal->isInductionVariable(Opd))
6917       return nullptr;
6918   }
6919 
6920   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6921   return PSE.getSCEV(Ptr);
6922 }
6923 
6924 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6925   return Legal->hasStride(I->getOperand(0)) ||
6926          Legal->hasStride(I->getOperand(1));
6927 }
6928 
6929 InstructionCost
6930 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6931                                                         ElementCount VF) {
6932   assert(VF.isVector() &&
6933          "Scalarization cost of instruction implies vectorization.");
6934   if (VF.isScalable())
6935     return InstructionCost::getInvalid();
6936 
6937   Type *ValTy = getLoadStoreType(I);
6938   auto SE = PSE.getSE();
6939 
6940   unsigned AS = getLoadStoreAddressSpace(I);
6941   Value *Ptr = getLoadStorePointerOperand(I);
6942   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6943 
6944   // Figure out whether the access is strided and get the stride value
6945   // if it's known in compile time
6946   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6947 
6948   // Get the cost of the scalar memory instruction and address computation.
6949   InstructionCost Cost =
6950       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6951 
6952   // Don't pass *I here, since it is scalar but will actually be part of a
6953   // vectorized loop where the user of it is a vectorized instruction.
6954   const Align Alignment = getLoadStoreAlignment(I);
6955   Cost += VF.getKnownMinValue() *
6956           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6957                               AS, TTI::TCK_RecipThroughput);
6958 
6959   // Get the overhead of the extractelement and insertelement instructions
6960   // we might create due to scalarization.
6961   Cost += getScalarizationOverhead(I, VF);
6962 
6963   // If we have a predicated load/store, it will need extra i1 extracts and
6964   // conditional branches, but may not be executed for each vector lane. Scale
6965   // the cost by the probability of executing the predicated block.
6966   if (isPredicatedInst(I)) {
6967     Cost /= getReciprocalPredBlockProb();
6968 
6969     // Add the cost of an i1 extract and a branch
6970     auto *Vec_i1Ty =
6971         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6972     Cost += TTI.getScalarizationOverhead(
6973         Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
6974         /*Insert=*/false, /*Extract=*/true);
6975     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6976 
6977     if (useEmulatedMaskMemRefHack(I))
6978       // Artificially setting to a high enough value to practically disable
6979       // vectorization with such operations.
6980       Cost = 3000000;
6981   }
6982 
6983   return Cost;
6984 }
6985 
6986 InstructionCost
6987 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6988                                                     ElementCount VF) {
6989   Type *ValTy = getLoadStoreType(I);
6990   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6991   Value *Ptr = getLoadStorePointerOperand(I);
6992   unsigned AS = getLoadStoreAddressSpace(I);
6993   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6994   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6995 
6996   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6997          "Stride should be 1 or -1 for consecutive memory access");
6998   const Align Alignment = getLoadStoreAlignment(I);
6999   InstructionCost Cost = 0;
7000   if (Legal->isMaskRequired(I))
7001     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7002                                       CostKind);
7003   else
7004     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7005                                 CostKind, I);
7006 
7007   bool Reverse = ConsecutiveStride < 0;
7008   if (Reverse)
7009     Cost +=
7010         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7011   return Cost;
7012 }
7013 
7014 InstructionCost
7015 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
7016                                                 ElementCount VF) {
7017   assert(Legal->isUniformMemOp(*I));
7018 
7019   Type *ValTy = getLoadStoreType(I);
7020   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7021   const Align Alignment = getLoadStoreAlignment(I);
7022   unsigned AS = getLoadStoreAddressSpace(I);
7023   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7024   if (isa<LoadInst>(I)) {
7025     return TTI.getAddressComputationCost(ValTy) +
7026            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
7027                                CostKind) +
7028            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7029   }
7030   StoreInst *SI = cast<StoreInst>(I);
7031 
7032   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
7033   return TTI.getAddressComputationCost(ValTy) +
7034          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
7035                              CostKind) +
7036          (isLoopInvariantStoreValue
7037               ? 0
7038               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
7039                                        VF.getKnownMinValue() - 1));
7040 }
7041 
7042 InstructionCost
7043 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7044                                                  ElementCount VF) {
7045   Type *ValTy = getLoadStoreType(I);
7046   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7047   const Align Alignment = getLoadStoreAlignment(I);
7048   const Value *Ptr = getLoadStorePointerOperand(I);
7049 
7050   return TTI.getAddressComputationCost(VectorTy) +
7051          TTI.getGatherScatterOpCost(
7052              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
7053              TargetTransformInfo::TCK_RecipThroughput, I);
7054 }
7055 
7056 InstructionCost
7057 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7058                                                    ElementCount VF) {
7059   // TODO: Once we have support for interleaving with scalable vectors
7060   // we can calculate the cost properly here.
7061   if (VF.isScalable())
7062     return InstructionCost::getInvalid();
7063 
7064   Type *ValTy = getLoadStoreType(I);
7065   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7066   unsigned AS = getLoadStoreAddressSpace(I);
7067 
7068   auto Group = getInterleavedAccessGroup(I);
7069   assert(Group && "Fail to get an interleaved access group.");
7070 
7071   unsigned InterleaveFactor = Group->getFactor();
7072   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7073 
7074   // Holds the indices of existing members in an interleaved load group.
7075   // An interleaved store group doesn't need this as it doesn't allow gaps.
7076   SmallVector<unsigned, 4> Indices;
7077   if (isa<LoadInst>(I)) {
7078     for (unsigned i = 0; i < InterleaveFactor; i++)
7079       if (Group->getMember(i))
7080         Indices.push_back(i);
7081   }
7082 
7083   // Calculate the cost of the whole interleaved group.
7084   bool UseMaskForGaps =
7085       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
7086   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
7087       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
7088       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
7089 
7090   if (Group->isReverse()) {
7091     // TODO: Add support for reversed masked interleaved access.
7092     assert(!Legal->isMaskRequired(I) &&
7093            "Reverse masked interleaved access not supported.");
7094     Cost +=
7095         Group->getNumMembers() *
7096         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7097   }
7098   return Cost;
7099 }
7100 
7101 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
7102     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
7103   // Early exit for no inloop reductions
7104   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
7105     return InstructionCost::getInvalid();
7106   auto *VectorTy = cast<VectorType>(Ty);
7107 
7108   // We are looking for a pattern of, and finding the minimal acceptable cost:
7109   //  reduce(mul(ext(A), ext(B))) or
7110   //  reduce(mul(A, B)) or
7111   //  reduce(ext(A)) or
7112   //  reduce(A).
7113   // The basic idea is that we walk down the tree to do that, finding the root
7114   // reduction instruction in InLoopReductionImmediateChains. From there we find
7115   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
7116   // of the components. If the reduction cost is lower then we return it for the
7117   // reduction instruction and 0 for the other instructions in the pattern. If
7118   // it is not we return an invalid cost specifying the orignal cost method
7119   // should be used.
7120   Instruction *RetI = I;
7121   if ((RetI->getOpcode() == Instruction::SExt ||
7122        RetI->getOpcode() == Instruction::ZExt)) {
7123     if (!RetI->hasOneUser())
7124       return InstructionCost::getInvalid();
7125     RetI = RetI->user_back();
7126   }
7127   if (RetI->getOpcode() == Instruction::Mul &&
7128       RetI->user_back()->getOpcode() == Instruction::Add) {
7129     if (!RetI->hasOneUser())
7130       return InstructionCost::getInvalid();
7131     RetI = RetI->user_back();
7132   }
7133 
7134   // Test if the found instruction is a reduction, and if not return an invalid
7135   // cost specifying the parent to use the original cost modelling.
7136   if (!InLoopReductionImmediateChains.count(RetI))
7137     return InstructionCost::getInvalid();
7138 
7139   // Find the reduction this chain is a part of and calculate the basic cost of
7140   // the reduction on its own.
7141   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
7142   Instruction *ReductionPhi = LastChain;
7143   while (!isa<PHINode>(ReductionPhi))
7144     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
7145 
7146   const RecurrenceDescriptor &RdxDesc =
7147       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
7148   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
7149       RdxDesc.getOpcode(), VectorTy, false, CostKind);
7150 
7151   // Get the operand that was not the reduction chain and match it to one of the
7152   // patterns, returning the better cost if it is found.
7153   Instruction *RedOp = RetI->getOperand(1) == LastChain
7154                            ? dyn_cast<Instruction>(RetI->getOperand(0))
7155                            : dyn_cast<Instruction>(RetI->getOperand(1));
7156 
7157   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7158 
7159   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
7160       !TheLoop->isLoopInvariant(RedOp)) {
7161     bool IsUnsigned = isa<ZExtInst>(RedOp);
7162     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7163     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7164         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7165         CostKind);
7166 
7167     InstructionCost ExtCost =
7168         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7169                              TTI::CastContextHint::None, CostKind, RedOp);
7170     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7171       return I == RetI ? *RedCost.getValue() : 0;
7172   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
7173     Instruction *Mul = RedOp;
7174     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
7175     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
7176     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
7177         Op0->getOpcode() == Op1->getOpcode() &&
7178         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7179         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7180       bool IsUnsigned = isa<ZExtInst>(Op0);
7181       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7182       // reduce(mul(ext, ext))
7183       InstructionCost ExtCost =
7184           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7185                                TTI::CastContextHint::None, CostKind, Op0);
7186       InstructionCost MulCost =
7187           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7188 
7189       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7190           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7191           CostKind);
7192 
7193       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7194         return I == RetI ? *RedCost.getValue() : 0;
7195     } else {
7196       InstructionCost MulCost =
7197           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7198 
7199       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7200           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7201           CostKind);
7202 
7203       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7204         return I == RetI ? *RedCost.getValue() : 0;
7205     }
7206   }
7207 
7208   return I == RetI ? BaseCost : InstructionCost::getInvalid();
7209 }
7210 
7211 InstructionCost
7212 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7213                                                      ElementCount VF) {
7214   // Calculate scalar cost only. Vectorization cost should be ready at this
7215   // moment.
7216   if (VF.isScalar()) {
7217     Type *ValTy = getLoadStoreType(I);
7218     const Align Alignment = getLoadStoreAlignment(I);
7219     unsigned AS = getLoadStoreAddressSpace(I);
7220 
7221     return TTI.getAddressComputationCost(ValTy) +
7222            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7223                                TTI::TCK_RecipThroughput, I);
7224   }
7225   return getWideningCost(I, VF);
7226 }
7227 
7228 LoopVectorizationCostModel::VectorizationCostTy
7229 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7230                                                ElementCount VF) {
7231   // If we know that this instruction will remain uniform, check the cost of
7232   // the scalar version.
7233   if (isUniformAfterVectorization(I, VF))
7234     VF = ElementCount::getFixed(1);
7235 
7236   if (VF.isVector() && isProfitableToScalarize(I, VF))
7237     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7238 
7239   // Forced scalars do not have any scalarization overhead.
7240   auto ForcedScalar = ForcedScalars.find(VF);
7241   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7242     auto InstSet = ForcedScalar->second;
7243     if (InstSet.count(I))
7244       return VectorizationCostTy(
7245           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7246            VF.getKnownMinValue()),
7247           false);
7248   }
7249 
7250   Type *VectorTy;
7251   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7252 
7253   bool TypeNotScalarized =
7254       VF.isVector() && VectorTy->isVectorTy() &&
7255       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7256   return VectorizationCostTy(C, TypeNotScalarized);
7257 }
7258 
7259 InstructionCost
7260 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7261                                                      ElementCount VF) const {
7262 
7263   if (VF.isScalable())
7264     return InstructionCost::getInvalid();
7265 
7266   if (VF.isScalar())
7267     return 0;
7268 
7269   InstructionCost Cost = 0;
7270   Type *RetTy = ToVectorTy(I->getType(), VF);
7271   if (!RetTy->isVoidTy() &&
7272       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7273     Cost += TTI.getScalarizationOverhead(
7274         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7275         true, false);
7276 
7277   // Some targets keep addresses scalar.
7278   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7279     return Cost;
7280 
7281   // Some targets support efficient element stores.
7282   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7283     return Cost;
7284 
7285   // Collect operands to consider.
7286   CallInst *CI = dyn_cast<CallInst>(I);
7287   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7288 
7289   // Skip operands that do not require extraction/scalarization and do not incur
7290   // any overhead.
7291   SmallVector<Type *> Tys;
7292   for (auto *V : filterExtractingOperands(Ops, VF))
7293     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7294   return Cost + TTI.getOperandsScalarizationOverhead(
7295                     filterExtractingOperands(Ops, VF), Tys);
7296 }
7297 
7298 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7299   if (VF.isScalar())
7300     return;
7301   NumPredStores = 0;
7302   for (BasicBlock *BB : TheLoop->blocks()) {
7303     // For each instruction in the old loop.
7304     for (Instruction &I : *BB) {
7305       Value *Ptr =  getLoadStorePointerOperand(&I);
7306       if (!Ptr)
7307         continue;
7308 
7309       // TODO: We should generate better code and update the cost model for
7310       // predicated uniform stores. Today they are treated as any other
7311       // predicated store (see added test cases in
7312       // invariant-store-vectorization.ll).
7313       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7314         NumPredStores++;
7315 
7316       if (Legal->isUniformMemOp(I)) {
7317         // TODO: Avoid replicating loads and stores instead of
7318         // relying on instcombine to remove them.
7319         // Load: Scalar load + broadcast
7320         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7321         InstructionCost Cost;
7322         if (isa<StoreInst>(&I) && VF.isScalable() &&
7323             isLegalGatherOrScatter(&I)) {
7324           Cost = getGatherScatterCost(&I, VF);
7325           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7326         } else {
7327           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7328                  "Cannot yet scalarize uniform stores");
7329           Cost = getUniformMemOpCost(&I, VF);
7330           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7331         }
7332         continue;
7333       }
7334 
7335       // We assume that widening is the best solution when possible.
7336       if (memoryInstructionCanBeWidened(&I, VF)) {
7337         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7338         int ConsecutiveStride =
7339                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7340         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7341                "Expected consecutive stride.");
7342         InstWidening Decision =
7343             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7344         setWideningDecision(&I, VF, Decision, Cost);
7345         continue;
7346       }
7347 
7348       // Choose between Interleaving, Gather/Scatter or Scalarization.
7349       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7350       unsigned NumAccesses = 1;
7351       if (isAccessInterleaved(&I)) {
7352         auto Group = getInterleavedAccessGroup(&I);
7353         assert(Group && "Fail to get an interleaved access group.");
7354 
7355         // Make one decision for the whole group.
7356         if (getWideningDecision(&I, VF) != CM_Unknown)
7357           continue;
7358 
7359         NumAccesses = Group->getNumMembers();
7360         if (interleavedAccessCanBeWidened(&I, VF))
7361           InterleaveCost = getInterleaveGroupCost(&I, VF);
7362       }
7363 
7364       InstructionCost GatherScatterCost =
7365           isLegalGatherOrScatter(&I)
7366               ? getGatherScatterCost(&I, VF) * NumAccesses
7367               : InstructionCost::getInvalid();
7368 
7369       InstructionCost ScalarizationCost =
7370           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7371 
7372       // Choose better solution for the current VF,
7373       // write down this decision and use it during vectorization.
7374       InstructionCost Cost;
7375       InstWidening Decision;
7376       if (InterleaveCost <= GatherScatterCost &&
7377           InterleaveCost < ScalarizationCost) {
7378         Decision = CM_Interleave;
7379         Cost = InterleaveCost;
7380       } else if (GatherScatterCost < ScalarizationCost) {
7381         Decision = CM_GatherScatter;
7382         Cost = GatherScatterCost;
7383       } else {
7384         assert(!VF.isScalable() &&
7385                "We cannot yet scalarise for scalable vectors");
7386         Decision = CM_Scalarize;
7387         Cost = ScalarizationCost;
7388       }
7389       // If the instructions belongs to an interleave group, the whole group
7390       // receives the same decision. The whole group receives the cost, but
7391       // the cost will actually be assigned to one instruction.
7392       if (auto Group = getInterleavedAccessGroup(&I))
7393         setWideningDecision(Group, VF, Decision, Cost);
7394       else
7395         setWideningDecision(&I, VF, Decision, Cost);
7396     }
7397   }
7398 
7399   // Make sure that any load of address and any other address computation
7400   // remains scalar unless there is gather/scatter support. This avoids
7401   // inevitable extracts into address registers, and also has the benefit of
7402   // activating LSR more, since that pass can't optimize vectorized
7403   // addresses.
7404   if (TTI.prefersVectorizedAddressing())
7405     return;
7406 
7407   // Start with all scalar pointer uses.
7408   SmallPtrSet<Instruction *, 8> AddrDefs;
7409   for (BasicBlock *BB : TheLoop->blocks())
7410     for (Instruction &I : *BB) {
7411       Instruction *PtrDef =
7412         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7413       if (PtrDef && TheLoop->contains(PtrDef) &&
7414           getWideningDecision(&I, VF) != CM_GatherScatter)
7415         AddrDefs.insert(PtrDef);
7416     }
7417 
7418   // Add all instructions used to generate the addresses.
7419   SmallVector<Instruction *, 4> Worklist;
7420   append_range(Worklist, AddrDefs);
7421   while (!Worklist.empty()) {
7422     Instruction *I = Worklist.pop_back_val();
7423     for (auto &Op : I->operands())
7424       if (auto *InstOp = dyn_cast<Instruction>(Op))
7425         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7426             AddrDefs.insert(InstOp).second)
7427           Worklist.push_back(InstOp);
7428   }
7429 
7430   for (auto *I : AddrDefs) {
7431     if (isa<LoadInst>(I)) {
7432       // Setting the desired widening decision should ideally be handled in
7433       // by cost functions, but since this involves the task of finding out
7434       // if the loaded register is involved in an address computation, it is
7435       // instead changed here when we know this is the case.
7436       InstWidening Decision = getWideningDecision(I, VF);
7437       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7438         // Scalarize a widened load of address.
7439         setWideningDecision(
7440             I, VF, CM_Scalarize,
7441             (VF.getKnownMinValue() *
7442              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7443       else if (auto Group = getInterleavedAccessGroup(I)) {
7444         // Scalarize an interleave group of address loads.
7445         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7446           if (Instruction *Member = Group->getMember(I))
7447             setWideningDecision(
7448                 Member, VF, CM_Scalarize,
7449                 (VF.getKnownMinValue() *
7450                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7451         }
7452       }
7453     } else
7454       // Make sure I gets scalarized and a cost estimate without
7455       // scalarization overhead.
7456       ForcedScalars[VF].insert(I);
7457   }
7458 }
7459 
7460 InstructionCost
7461 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7462                                                Type *&VectorTy) {
7463   Type *RetTy = I->getType();
7464   if (canTruncateToMinimalBitwidth(I, VF))
7465     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7466   auto SE = PSE.getSE();
7467   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7468 
7469   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7470                                                 ElementCount VF) -> bool {
7471     if (VF.isScalar())
7472       return true;
7473 
7474     auto Scalarized = InstsToScalarize.find(VF);
7475     assert(Scalarized != InstsToScalarize.end() &&
7476            "VF not yet analyzed for scalarization profitability");
7477     return !Scalarized->second.count(I) &&
7478            llvm::all_of(I->users(), [&](User *U) {
7479              auto *UI = cast<Instruction>(U);
7480              return !Scalarized->second.count(UI);
7481            });
7482   };
7483   (void) hasSingleCopyAfterVectorization;
7484 
7485   if (isScalarAfterVectorization(I, VF)) {
7486     // With the exception of GEPs and PHIs, after scalarization there should
7487     // only be one copy of the instruction generated in the loop. This is
7488     // because the VF is either 1, or any instructions that need scalarizing
7489     // have already been dealt with by the the time we get here. As a result,
7490     // it means we don't have to multiply the instruction cost by VF.
7491     assert(I->getOpcode() == Instruction::GetElementPtr ||
7492            I->getOpcode() == Instruction::PHI ||
7493            (I->getOpcode() == Instruction::BitCast &&
7494             I->getType()->isPointerTy()) ||
7495            hasSingleCopyAfterVectorization(I, VF));
7496     VectorTy = RetTy;
7497   } else
7498     VectorTy = ToVectorTy(RetTy, VF);
7499 
7500   // TODO: We need to estimate the cost of intrinsic calls.
7501   switch (I->getOpcode()) {
7502   case Instruction::GetElementPtr:
7503     // We mark this instruction as zero-cost because the cost of GEPs in
7504     // vectorized code depends on whether the corresponding memory instruction
7505     // is scalarized or not. Therefore, we handle GEPs with the memory
7506     // instruction cost.
7507     return 0;
7508   case Instruction::Br: {
7509     // In cases of scalarized and predicated instructions, there will be VF
7510     // predicated blocks in the vectorized loop. Each branch around these
7511     // blocks requires also an extract of its vector compare i1 element.
7512     bool ScalarPredicatedBB = false;
7513     BranchInst *BI = cast<BranchInst>(I);
7514     if (VF.isVector() && BI->isConditional() &&
7515         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7516          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7517       ScalarPredicatedBB = true;
7518 
7519     if (ScalarPredicatedBB) {
7520       // Return cost for branches around scalarized and predicated blocks.
7521       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7522       auto *Vec_i1Ty =
7523           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7524       return (TTI.getScalarizationOverhead(
7525                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7526                   false, true) +
7527               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7528                VF.getKnownMinValue()));
7529     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7530       // The back-edge branch will remain, as will all scalar branches.
7531       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7532     else
7533       // This branch will be eliminated by if-conversion.
7534       return 0;
7535     // Note: We currently assume zero cost for an unconditional branch inside
7536     // a predicated block since it will become a fall-through, although we
7537     // may decide in the future to call TTI for all branches.
7538   }
7539   case Instruction::PHI: {
7540     auto *Phi = cast<PHINode>(I);
7541 
7542     // First-order recurrences are replaced by vector shuffles inside the loop.
7543     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7544     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7545       return TTI.getShuffleCost(
7546           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7547           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7548 
7549     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7550     // converted into select instructions. We require N - 1 selects per phi
7551     // node, where N is the number of incoming values.
7552     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7553       return (Phi->getNumIncomingValues() - 1) *
7554              TTI.getCmpSelInstrCost(
7555                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7556                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7557                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7558 
7559     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7560   }
7561   case Instruction::UDiv:
7562   case Instruction::SDiv:
7563   case Instruction::URem:
7564   case Instruction::SRem:
7565     // If we have a predicated instruction, it may not be executed for each
7566     // vector lane. Get the scalarization cost and scale this amount by the
7567     // probability of executing the predicated block. If the instruction is not
7568     // predicated, we fall through to the next case.
7569     if (VF.isVector() && isScalarWithPredication(I)) {
7570       InstructionCost Cost = 0;
7571 
7572       // These instructions have a non-void type, so account for the phi nodes
7573       // that we will create. This cost is likely to be zero. The phi node
7574       // cost, if any, should be scaled by the block probability because it
7575       // models a copy at the end of each predicated block.
7576       Cost += VF.getKnownMinValue() *
7577               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7578 
7579       // The cost of the non-predicated instruction.
7580       Cost += VF.getKnownMinValue() *
7581               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7582 
7583       // The cost of insertelement and extractelement instructions needed for
7584       // scalarization.
7585       Cost += getScalarizationOverhead(I, VF);
7586 
7587       // Scale the cost by the probability of executing the predicated blocks.
7588       // This assumes the predicated block for each vector lane is equally
7589       // likely.
7590       return Cost / getReciprocalPredBlockProb();
7591     }
7592     LLVM_FALLTHROUGH;
7593   case Instruction::Add:
7594   case Instruction::FAdd:
7595   case Instruction::Sub:
7596   case Instruction::FSub:
7597   case Instruction::Mul:
7598   case Instruction::FMul:
7599   case Instruction::FDiv:
7600   case Instruction::FRem:
7601   case Instruction::Shl:
7602   case Instruction::LShr:
7603   case Instruction::AShr:
7604   case Instruction::And:
7605   case Instruction::Or:
7606   case Instruction::Xor: {
7607     // Since we will replace the stride by 1 the multiplication should go away.
7608     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7609       return 0;
7610 
7611     // Detect reduction patterns
7612     InstructionCost RedCost;
7613     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7614             .isValid())
7615       return RedCost;
7616 
7617     // Certain instructions can be cheaper to vectorize if they have a constant
7618     // second vector operand. One example of this are shifts on x86.
7619     Value *Op2 = I->getOperand(1);
7620     TargetTransformInfo::OperandValueProperties Op2VP;
7621     TargetTransformInfo::OperandValueKind Op2VK =
7622         TTI.getOperandInfo(Op2, Op2VP);
7623     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7624       Op2VK = TargetTransformInfo::OK_UniformValue;
7625 
7626     SmallVector<const Value *, 4> Operands(I->operand_values());
7627     return TTI.getArithmeticInstrCost(
7628         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7629         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7630   }
7631   case Instruction::FNeg: {
7632     return TTI.getArithmeticInstrCost(
7633         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7634         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7635         TargetTransformInfo::OP_None, I->getOperand(0), I);
7636   }
7637   case Instruction::Select: {
7638     SelectInst *SI = cast<SelectInst>(I);
7639     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7640     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7641 
7642     const Value *Op0, *Op1;
7643     using namespace llvm::PatternMatch;
7644     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7645                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7646       // select x, y, false --> x & y
7647       // select x, true, y --> x | y
7648       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7649       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7650       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7651       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7652       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7653               Op1->getType()->getScalarSizeInBits() == 1);
7654 
7655       SmallVector<const Value *, 2> Operands{Op0, Op1};
7656       return TTI.getArithmeticInstrCost(
7657           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7658           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7659     }
7660 
7661     Type *CondTy = SI->getCondition()->getType();
7662     if (!ScalarCond)
7663       CondTy = VectorType::get(CondTy, VF);
7664     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7665                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7666   }
7667   case Instruction::ICmp:
7668   case Instruction::FCmp: {
7669     Type *ValTy = I->getOperand(0)->getType();
7670     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7671     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7672       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7673     VectorTy = ToVectorTy(ValTy, VF);
7674     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7675                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7676   }
7677   case Instruction::Store:
7678   case Instruction::Load: {
7679     ElementCount Width = VF;
7680     if (Width.isVector()) {
7681       InstWidening Decision = getWideningDecision(I, Width);
7682       assert(Decision != CM_Unknown &&
7683              "CM decision should be taken at this point");
7684       if (Decision == CM_Scalarize)
7685         Width = ElementCount::getFixed(1);
7686     }
7687     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7688     return getMemoryInstructionCost(I, VF);
7689   }
7690   case Instruction::BitCast:
7691     if (I->getType()->isPointerTy())
7692       return 0;
7693     LLVM_FALLTHROUGH;
7694   case Instruction::ZExt:
7695   case Instruction::SExt:
7696   case Instruction::FPToUI:
7697   case Instruction::FPToSI:
7698   case Instruction::FPExt:
7699   case Instruction::PtrToInt:
7700   case Instruction::IntToPtr:
7701   case Instruction::SIToFP:
7702   case Instruction::UIToFP:
7703   case Instruction::Trunc:
7704   case Instruction::FPTrunc: {
7705     // Computes the CastContextHint from a Load/Store instruction.
7706     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7707       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7708              "Expected a load or a store!");
7709 
7710       if (VF.isScalar() || !TheLoop->contains(I))
7711         return TTI::CastContextHint::Normal;
7712 
7713       switch (getWideningDecision(I, VF)) {
7714       case LoopVectorizationCostModel::CM_GatherScatter:
7715         return TTI::CastContextHint::GatherScatter;
7716       case LoopVectorizationCostModel::CM_Interleave:
7717         return TTI::CastContextHint::Interleave;
7718       case LoopVectorizationCostModel::CM_Scalarize:
7719       case LoopVectorizationCostModel::CM_Widen:
7720         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7721                                         : TTI::CastContextHint::Normal;
7722       case LoopVectorizationCostModel::CM_Widen_Reverse:
7723         return TTI::CastContextHint::Reversed;
7724       case LoopVectorizationCostModel::CM_Unknown:
7725         llvm_unreachable("Instr did not go through cost modelling?");
7726       }
7727 
7728       llvm_unreachable("Unhandled case!");
7729     };
7730 
7731     unsigned Opcode = I->getOpcode();
7732     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7733     // For Trunc, the context is the only user, which must be a StoreInst.
7734     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7735       if (I->hasOneUse())
7736         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7737           CCH = ComputeCCH(Store);
7738     }
7739     // For Z/Sext, the context is the operand, which must be a LoadInst.
7740     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7741              Opcode == Instruction::FPExt) {
7742       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7743         CCH = ComputeCCH(Load);
7744     }
7745 
7746     // We optimize the truncation of induction variables having constant
7747     // integer steps. The cost of these truncations is the same as the scalar
7748     // operation.
7749     if (isOptimizableIVTruncate(I, VF)) {
7750       auto *Trunc = cast<TruncInst>(I);
7751       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7752                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7753     }
7754 
7755     // Detect reduction patterns
7756     InstructionCost RedCost;
7757     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7758             .isValid())
7759       return RedCost;
7760 
7761     Type *SrcScalarTy = I->getOperand(0)->getType();
7762     Type *SrcVecTy =
7763         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7764     if (canTruncateToMinimalBitwidth(I, VF)) {
7765       // This cast is going to be shrunk. This may remove the cast or it might
7766       // turn it into slightly different cast. For example, if MinBW == 16,
7767       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7768       //
7769       // Calculate the modified src and dest types.
7770       Type *MinVecTy = VectorTy;
7771       if (Opcode == Instruction::Trunc) {
7772         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7773         VectorTy =
7774             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7775       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7776         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7777         VectorTy =
7778             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7779       }
7780     }
7781 
7782     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7783   }
7784   case Instruction::Call: {
7785     bool NeedToScalarize;
7786     CallInst *CI = cast<CallInst>(I);
7787     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7788     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7789       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7790       return std::min(CallCost, IntrinsicCost);
7791     }
7792     return CallCost;
7793   }
7794   case Instruction::ExtractValue:
7795     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7796   default:
7797     // This opcode is unknown. Assume that it is the same as 'mul'.
7798     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7799   } // end of switch.
7800 }
7801 
7802 char LoopVectorize::ID = 0;
7803 
7804 static const char lv_name[] = "Loop Vectorization";
7805 
7806 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7807 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7808 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7809 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7810 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7811 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7812 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7813 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7814 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7815 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7816 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7817 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7818 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7819 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7820 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7821 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7822 
7823 namespace llvm {
7824 
7825 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7826 
7827 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7828                               bool VectorizeOnlyWhenForced) {
7829   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7830 }
7831 
7832 } // end namespace llvm
7833 
7834 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7835   // Check if the pointer operand of a load or store instruction is
7836   // consecutive.
7837   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7838     return Legal->isConsecutivePtr(Ptr);
7839   return false;
7840 }
7841 
7842 void LoopVectorizationCostModel::collectValuesToIgnore() {
7843   // Ignore ephemeral values.
7844   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7845 
7846   // Ignore type-promoting instructions we identified during reduction
7847   // detection.
7848   for (auto &Reduction : Legal->getReductionVars()) {
7849     RecurrenceDescriptor &RedDes = Reduction.second;
7850     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7851     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7852   }
7853   // Ignore type-casting instructions we identified during induction
7854   // detection.
7855   for (auto &Induction : Legal->getInductionVars()) {
7856     InductionDescriptor &IndDes = Induction.second;
7857     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7858     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7859   }
7860 }
7861 
7862 void LoopVectorizationCostModel::collectInLoopReductions() {
7863   for (auto &Reduction : Legal->getReductionVars()) {
7864     PHINode *Phi = Reduction.first;
7865     RecurrenceDescriptor &RdxDesc = Reduction.second;
7866 
7867     // We don't collect reductions that are type promoted (yet).
7868     if (RdxDesc.getRecurrenceType() != Phi->getType())
7869       continue;
7870 
7871     // If the target would prefer this reduction to happen "in-loop", then we
7872     // want to record it as such.
7873     unsigned Opcode = RdxDesc.getOpcode();
7874     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7875         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7876                                    TargetTransformInfo::ReductionFlags()))
7877       continue;
7878 
7879     // Check that we can correctly put the reductions into the loop, by
7880     // finding the chain of operations that leads from the phi to the loop
7881     // exit value.
7882     SmallVector<Instruction *, 4> ReductionOperations =
7883         RdxDesc.getReductionOpChain(Phi, TheLoop);
7884     bool InLoop = !ReductionOperations.empty();
7885     if (InLoop) {
7886       InLoopReductionChains[Phi] = ReductionOperations;
7887       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7888       Instruction *LastChain = Phi;
7889       for (auto *I : ReductionOperations) {
7890         InLoopReductionImmediateChains[I] = LastChain;
7891         LastChain = I;
7892       }
7893     }
7894     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7895                       << " reduction for phi: " << *Phi << "\n");
7896   }
7897 }
7898 
7899 // TODO: we could return a pair of values that specify the max VF and
7900 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7901 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7902 // doesn't have a cost model that can choose which plan to execute if
7903 // more than one is generated.
7904 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7905                                  LoopVectorizationCostModel &CM) {
7906   unsigned WidestType;
7907   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7908   return WidestVectorRegBits / WidestType;
7909 }
7910 
7911 VectorizationFactor
7912 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7913   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7914   ElementCount VF = UserVF;
7915   // Outer loop handling: They may require CFG and instruction level
7916   // transformations before even evaluating whether vectorization is profitable.
7917   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7918   // the vectorization pipeline.
7919   if (!OrigLoop->isInnermost()) {
7920     // If the user doesn't provide a vectorization factor, determine a
7921     // reasonable one.
7922     if (UserVF.isZero()) {
7923       VF = ElementCount::getFixed(determineVPlanVF(
7924           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7925               .getFixedSize(),
7926           CM));
7927       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7928 
7929       // Make sure we have a VF > 1 for stress testing.
7930       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7931         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7932                           << "overriding computed VF.\n");
7933         VF = ElementCount::getFixed(4);
7934       }
7935     }
7936     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7937     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7938            "VF needs to be a power of two");
7939     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7940                       << "VF " << VF << " to build VPlans.\n");
7941     buildVPlans(VF, VF);
7942 
7943     // For VPlan build stress testing, we bail out after VPlan construction.
7944     if (VPlanBuildStressTest)
7945       return VectorizationFactor::Disabled();
7946 
7947     return {VF, 0 /*Cost*/};
7948   }
7949 
7950   LLVM_DEBUG(
7951       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7952                 "VPlan-native path.\n");
7953   return VectorizationFactor::Disabled();
7954 }
7955 
7956 Optional<VectorizationFactor>
7957 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7958   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7959   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7960   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7961     return None;
7962 
7963   // Invalidate interleave groups if all blocks of loop will be predicated.
7964   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7965       !useMaskedInterleavedAccesses(*TTI)) {
7966     LLVM_DEBUG(
7967         dbgs()
7968         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7969            "which requires masked-interleaved support.\n");
7970     if (CM.InterleaveInfo.invalidateGroups())
7971       // Invalidating interleave groups also requires invalidating all decisions
7972       // based on them, which includes widening decisions and uniform and scalar
7973       // values.
7974       CM.invalidateCostModelingDecisions();
7975   }
7976 
7977   ElementCount MaxUserVF =
7978       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7979   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7980   if (!UserVF.isZero() && UserVFIsLegal) {
7981     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7982                       << " VF " << UserVF << ".\n");
7983     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7984            "VF needs to be a power of two");
7985     // Collect the instructions (and their associated costs) that will be more
7986     // profitable to scalarize.
7987     CM.selectUserVectorizationFactor(UserVF);
7988     CM.collectInLoopReductions();
7989     buildVPlansWithVPRecipes(UserVF, UserVF);
7990     LLVM_DEBUG(printPlans(dbgs()));
7991     return {{UserVF, 0}};
7992   }
7993 
7994   // Populate the set of Vectorization Factor Candidates.
7995   ElementCountSet VFCandidates;
7996   for (auto VF = ElementCount::getFixed(1);
7997        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7998     VFCandidates.insert(VF);
7999   for (auto VF = ElementCount::getScalable(1);
8000        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
8001     VFCandidates.insert(VF);
8002 
8003   for (const auto &VF : VFCandidates) {
8004     // Collect Uniform and Scalar instructions after vectorization with VF.
8005     CM.collectUniformsAndScalars(VF);
8006 
8007     // Collect the instructions (and their associated costs) that will be more
8008     // profitable to scalarize.
8009     if (VF.isVector())
8010       CM.collectInstsToScalarize(VF);
8011   }
8012 
8013   CM.collectInLoopReductions();
8014   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
8015   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
8016 
8017   LLVM_DEBUG(printPlans(dbgs()));
8018   if (!MaxFactors.hasVector())
8019     return VectorizationFactor::Disabled();
8020 
8021   // Select the optimal vectorization factor.
8022   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
8023 
8024   // Check if it is profitable to vectorize with runtime checks.
8025   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
8026   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
8027     bool PragmaThresholdReached =
8028         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
8029     bool ThresholdReached =
8030         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
8031     if ((ThresholdReached && !Hints.allowReordering()) ||
8032         PragmaThresholdReached) {
8033       ORE->emit([&]() {
8034         return OptimizationRemarkAnalysisAliasing(
8035                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
8036                    OrigLoop->getHeader())
8037                << "loop not vectorized: cannot prove it is safe to reorder "
8038                   "memory operations";
8039       });
8040       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8041       Hints.emitRemarkWithHints();
8042       return VectorizationFactor::Disabled();
8043     }
8044   }
8045   return SelectedVF;
8046 }
8047 
8048 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
8049   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
8050                     << '\n');
8051   BestVF = VF;
8052   BestUF = UF;
8053 
8054   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
8055     return !Plan->hasVF(VF);
8056   });
8057   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
8058 }
8059 
8060 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
8061                                            DominatorTree *DT) {
8062   // Perform the actual loop transformation.
8063 
8064   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
8065   assert(BestVF.hasValue() && "Vectorization Factor is missing");
8066   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
8067 
8068   VPTransformState State{
8069       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
8070   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
8071   State.TripCount = ILV.getOrCreateTripCount(nullptr);
8072   State.CanonicalIV = ILV.Induction;
8073 
8074   ILV.printDebugTracesAtStart();
8075 
8076   //===------------------------------------------------===//
8077   //
8078   // Notice: any optimization or new instruction that go
8079   // into the code below should also be implemented in
8080   // the cost-model.
8081   //
8082   //===------------------------------------------------===//
8083 
8084   // 2. Copy and widen instructions from the old loop into the new loop.
8085   VPlans.front()->execute(&State);
8086 
8087   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8088   //    predication, updating analyses.
8089   ILV.fixVectorizedLoop(State);
8090 
8091   ILV.printDebugTracesAtEnd();
8092 }
8093 
8094 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8095 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8096   for (const auto &Plan : VPlans)
8097     if (PrintVPlansInDotFormat)
8098       Plan->printDOT(O);
8099     else
8100       Plan->print(O);
8101 }
8102 #endif
8103 
8104 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8105     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8106 
8107   // We create new control-flow for the vectorized loop, so the original exit
8108   // conditions will be dead after vectorization if it's only used by the
8109   // terminator
8110   SmallVector<BasicBlock*> ExitingBlocks;
8111   OrigLoop->getExitingBlocks(ExitingBlocks);
8112   for (auto *BB : ExitingBlocks) {
8113     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8114     if (!Cmp || !Cmp->hasOneUse())
8115       continue;
8116 
8117     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8118     if (!DeadInstructions.insert(Cmp).second)
8119       continue;
8120 
8121     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8122     // TODO: can recurse through operands in general
8123     for (Value *Op : Cmp->operands()) {
8124       if (isa<TruncInst>(Op) && Op->hasOneUse())
8125           DeadInstructions.insert(cast<Instruction>(Op));
8126     }
8127   }
8128 
8129   // We create new "steps" for induction variable updates to which the original
8130   // induction variables map. An original update instruction will be dead if
8131   // all its users except the induction variable are dead.
8132   auto *Latch = OrigLoop->getLoopLatch();
8133   for (auto &Induction : Legal->getInductionVars()) {
8134     PHINode *Ind = Induction.first;
8135     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8136 
8137     // If the tail is to be folded by masking, the primary induction variable,
8138     // if exists, isn't dead: it will be used for masking. Don't kill it.
8139     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8140       continue;
8141 
8142     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8143           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8144         }))
8145       DeadInstructions.insert(IndUpdate);
8146 
8147     // We record as "Dead" also the type-casting instructions we had identified
8148     // during induction analysis. We don't need any handling for them in the
8149     // vectorized loop because we have proven that, under a proper runtime
8150     // test guarding the vectorized loop, the value of the phi, and the casted
8151     // value of the phi, are the same. The last instruction in this casting chain
8152     // will get its scalar/vector/widened def from the scalar/vector/widened def
8153     // of the respective phi node. Any other casts in the induction def-use chain
8154     // have no other uses outside the phi update chain, and will be ignored.
8155     InductionDescriptor &IndDes = Induction.second;
8156     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
8157     DeadInstructions.insert(Casts.begin(), Casts.end());
8158   }
8159 }
8160 
8161 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
8162 
8163 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8164 
8165 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
8166                                         Instruction::BinaryOps BinOp) {
8167   // When unrolling and the VF is 1, we only need to add a simple scalar.
8168   Type *Ty = Val->getType();
8169   assert(!Ty->isVectorTy() && "Val must be a scalar");
8170 
8171   if (Ty->isFloatingPointTy()) {
8172     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
8173 
8174     // Floating-point operations inherit FMF via the builder's flags.
8175     Value *MulOp = Builder.CreateFMul(C, Step);
8176     return Builder.CreateBinOp(BinOp, Val, MulOp);
8177   }
8178   Constant *C = ConstantInt::get(Ty, StartIdx);
8179   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
8180 }
8181 
8182 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8183   SmallVector<Metadata *, 4> MDs;
8184   // Reserve first location for self reference to the LoopID metadata node.
8185   MDs.push_back(nullptr);
8186   bool IsUnrollMetadata = false;
8187   MDNode *LoopID = L->getLoopID();
8188   if (LoopID) {
8189     // First find existing loop unrolling disable metadata.
8190     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8191       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8192       if (MD) {
8193         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8194         IsUnrollMetadata =
8195             S && S->getString().startswith("llvm.loop.unroll.disable");
8196       }
8197       MDs.push_back(LoopID->getOperand(i));
8198     }
8199   }
8200 
8201   if (!IsUnrollMetadata) {
8202     // Add runtime unroll disable metadata.
8203     LLVMContext &Context = L->getHeader()->getContext();
8204     SmallVector<Metadata *, 1> DisableOperands;
8205     DisableOperands.push_back(
8206         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8207     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8208     MDs.push_back(DisableNode);
8209     MDNode *NewLoopID = MDNode::get(Context, MDs);
8210     // Set operand 0 to refer to the loop id itself.
8211     NewLoopID->replaceOperandWith(0, NewLoopID);
8212     L->setLoopID(NewLoopID);
8213   }
8214 }
8215 
8216 //===--------------------------------------------------------------------===//
8217 // EpilogueVectorizerMainLoop
8218 //===--------------------------------------------------------------------===//
8219 
8220 /// This function is partially responsible for generating the control flow
8221 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8222 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8223   MDNode *OrigLoopID = OrigLoop->getLoopID();
8224   Loop *Lp = createVectorLoopSkeleton("");
8225 
8226   // Generate the code to check the minimum iteration count of the vector
8227   // epilogue (see below).
8228   EPI.EpilogueIterationCountCheck =
8229       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8230   EPI.EpilogueIterationCountCheck->setName("iter.check");
8231 
8232   // Generate the code to check any assumptions that we've made for SCEV
8233   // expressions.
8234   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8235 
8236   // Generate the code that checks at runtime if arrays overlap. We put the
8237   // checks into a separate block to make the more common case of few elements
8238   // faster.
8239   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8240 
8241   // Generate the iteration count check for the main loop, *after* the check
8242   // for the epilogue loop, so that the path-length is shorter for the case
8243   // that goes directly through the vector epilogue. The longer-path length for
8244   // the main loop is compensated for, by the gain from vectorizing the larger
8245   // trip count. Note: the branch will get updated later on when we vectorize
8246   // the epilogue.
8247   EPI.MainLoopIterationCountCheck =
8248       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8249 
8250   // Generate the induction variable.
8251   OldInduction = Legal->getPrimaryInduction();
8252   Type *IdxTy = Legal->getWidestInductionType();
8253   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8254   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8255   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8256   EPI.VectorTripCount = CountRoundDown;
8257   Induction =
8258       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8259                               getDebugLocFromInstOrOperands(OldInduction));
8260 
8261   // Skip induction resume value creation here because they will be created in
8262   // the second pass. If we created them here, they wouldn't be used anyway,
8263   // because the vplan in the second pass still contains the inductions from the
8264   // original loop.
8265 
8266   return completeLoopSkeleton(Lp, OrigLoopID);
8267 }
8268 
8269 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8270   LLVM_DEBUG({
8271     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8272            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8273            << ", Main Loop UF:" << EPI.MainLoopUF
8274            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8275            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8276   });
8277 }
8278 
8279 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8280   DEBUG_WITH_TYPE(VerboseDebug, {
8281     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8282   });
8283 }
8284 
8285 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8286     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8287   assert(L && "Expected valid Loop.");
8288   assert(Bypass && "Expected valid bypass basic block.");
8289   unsigned VFactor =
8290       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8291   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8292   Value *Count = getOrCreateTripCount(L);
8293   // Reuse existing vector loop preheader for TC checks.
8294   // Note that new preheader block is generated for vector loop.
8295   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8296   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8297 
8298   // Generate code to check if the loop's trip count is less than VF * UF of the
8299   // main vector loop.
8300   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8301       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8302 
8303   Value *CheckMinIters = Builder.CreateICmp(
8304       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8305       "min.iters.check");
8306 
8307   if (!ForEpilogue)
8308     TCCheckBlock->setName("vector.main.loop.iter.check");
8309 
8310   // Create new preheader for vector loop.
8311   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8312                                    DT, LI, nullptr, "vector.ph");
8313 
8314   if (ForEpilogue) {
8315     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8316                                  DT->getNode(Bypass)->getIDom()) &&
8317            "TC check is expected to dominate Bypass");
8318 
8319     // Update dominator for Bypass & LoopExit.
8320     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8321     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8322 
8323     LoopBypassBlocks.push_back(TCCheckBlock);
8324 
8325     // Save the trip count so we don't have to regenerate it in the
8326     // vec.epilog.iter.check. This is safe to do because the trip count
8327     // generated here dominates the vector epilog iter check.
8328     EPI.TripCount = Count;
8329   }
8330 
8331   ReplaceInstWithInst(
8332       TCCheckBlock->getTerminator(),
8333       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8334 
8335   return TCCheckBlock;
8336 }
8337 
8338 //===--------------------------------------------------------------------===//
8339 // EpilogueVectorizerEpilogueLoop
8340 //===--------------------------------------------------------------------===//
8341 
8342 /// This function is partially responsible for generating the control flow
8343 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8344 BasicBlock *
8345 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8346   MDNode *OrigLoopID = OrigLoop->getLoopID();
8347   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8348 
8349   // Now, compare the remaining count and if there aren't enough iterations to
8350   // execute the vectorized epilogue skip to the scalar part.
8351   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8352   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8353   LoopVectorPreHeader =
8354       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8355                  LI, nullptr, "vec.epilog.ph");
8356   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8357                                           VecEpilogueIterationCountCheck);
8358 
8359   // Adjust the control flow taking the state info from the main loop
8360   // vectorization into account.
8361   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8362          "expected this to be saved from the previous pass.");
8363   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8364       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8365 
8366   DT->changeImmediateDominator(LoopVectorPreHeader,
8367                                EPI.MainLoopIterationCountCheck);
8368 
8369   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8370       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8371 
8372   if (EPI.SCEVSafetyCheck)
8373     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8374         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8375   if (EPI.MemSafetyCheck)
8376     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8377         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8378 
8379   DT->changeImmediateDominator(
8380       VecEpilogueIterationCountCheck,
8381       VecEpilogueIterationCountCheck->getSinglePredecessor());
8382 
8383   DT->changeImmediateDominator(LoopScalarPreHeader,
8384                                EPI.EpilogueIterationCountCheck);
8385   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8386 
8387   // Keep track of bypass blocks, as they feed start values to the induction
8388   // phis in the scalar loop preheader.
8389   if (EPI.SCEVSafetyCheck)
8390     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8391   if (EPI.MemSafetyCheck)
8392     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8393   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8394 
8395   // Generate a resume induction for the vector epilogue and put it in the
8396   // vector epilogue preheader
8397   Type *IdxTy = Legal->getWidestInductionType();
8398   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8399                                          LoopVectorPreHeader->getFirstNonPHI());
8400   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8401   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8402                            EPI.MainLoopIterationCountCheck);
8403 
8404   // Generate the induction variable.
8405   OldInduction = Legal->getPrimaryInduction();
8406   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8407   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8408   Value *StartIdx = EPResumeVal;
8409   Induction =
8410       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8411                               getDebugLocFromInstOrOperands(OldInduction));
8412 
8413   // Generate induction resume values. These variables save the new starting
8414   // indexes for the scalar loop. They are used to test if there are any tail
8415   // iterations left once the vector loop has completed.
8416   // Note that when the vectorized epilogue is skipped due to iteration count
8417   // check, then the resume value for the induction variable comes from
8418   // the trip count of the main vector loop, hence passing the AdditionalBypass
8419   // argument.
8420   createInductionResumeValues(Lp, CountRoundDown,
8421                               {VecEpilogueIterationCountCheck,
8422                                EPI.VectorTripCount} /* AdditionalBypass */);
8423 
8424   AddRuntimeUnrollDisableMetaData(Lp);
8425   return completeLoopSkeleton(Lp, OrigLoopID);
8426 }
8427 
8428 BasicBlock *
8429 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8430     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8431 
8432   assert(EPI.TripCount &&
8433          "Expected trip count to have been safed in the first pass.");
8434   assert(
8435       (!isa<Instruction>(EPI.TripCount) ||
8436        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8437       "saved trip count does not dominate insertion point.");
8438   Value *TC = EPI.TripCount;
8439   IRBuilder<> Builder(Insert->getTerminator());
8440   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8441 
8442   // Generate code to check if the loop's trip count is less than VF * UF of the
8443   // vector epilogue loop.
8444   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8445       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8446 
8447   Value *CheckMinIters = Builder.CreateICmp(
8448       P, Count,
8449       ConstantInt::get(Count->getType(),
8450                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8451       "min.epilog.iters.check");
8452 
8453   ReplaceInstWithInst(
8454       Insert->getTerminator(),
8455       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8456 
8457   LoopBypassBlocks.push_back(Insert);
8458   return Insert;
8459 }
8460 
8461 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8462   LLVM_DEBUG({
8463     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8464            << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8465            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8466   });
8467 }
8468 
8469 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8470   DEBUG_WITH_TYPE(VerboseDebug, {
8471     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8472   });
8473 }
8474 
8475 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8476     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8477   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8478   bool PredicateAtRangeStart = Predicate(Range.Start);
8479 
8480   for (ElementCount TmpVF = Range.Start * 2;
8481        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8482     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8483       Range.End = TmpVF;
8484       break;
8485     }
8486 
8487   return PredicateAtRangeStart;
8488 }
8489 
8490 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8491 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8492 /// of VF's starting at a given VF and extending it as much as possible. Each
8493 /// vectorization decision can potentially shorten this sub-range during
8494 /// buildVPlan().
8495 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8496                                            ElementCount MaxVF) {
8497   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8498   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8499     VFRange SubRange = {VF, MaxVFPlusOne};
8500     VPlans.push_back(buildVPlan(SubRange));
8501     VF = SubRange.End;
8502   }
8503 }
8504 
8505 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8506                                          VPlanPtr &Plan) {
8507   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8508 
8509   // Look for cached value.
8510   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8511   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8512   if (ECEntryIt != EdgeMaskCache.end())
8513     return ECEntryIt->second;
8514 
8515   VPValue *SrcMask = createBlockInMask(Src, Plan);
8516 
8517   // The terminator has to be a branch inst!
8518   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8519   assert(BI && "Unexpected terminator found");
8520 
8521   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8522     return EdgeMaskCache[Edge] = SrcMask;
8523 
8524   // If source is an exiting block, we know the exit edge is dynamically dead
8525   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8526   // adding uses of an otherwise potentially dead instruction.
8527   if (OrigLoop->isLoopExiting(Src))
8528     return EdgeMaskCache[Edge] = SrcMask;
8529 
8530   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8531   assert(EdgeMask && "No Edge Mask found for condition");
8532 
8533   if (BI->getSuccessor(0) != Dst)
8534     EdgeMask = Builder.createNot(EdgeMask);
8535 
8536   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8537     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8538     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8539     // The select version does not introduce new UB if SrcMask is false and
8540     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8541     VPValue *False = Plan->getOrAddVPValue(
8542         ConstantInt::getFalse(BI->getCondition()->getType()));
8543     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8544   }
8545 
8546   return EdgeMaskCache[Edge] = EdgeMask;
8547 }
8548 
8549 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8550   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8551 
8552   // Look for cached value.
8553   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8554   if (BCEntryIt != BlockMaskCache.end())
8555     return BCEntryIt->second;
8556 
8557   // All-one mask is modelled as no-mask following the convention for masked
8558   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8559   VPValue *BlockMask = nullptr;
8560 
8561   if (OrigLoop->getHeader() == BB) {
8562     if (!CM.blockNeedsPredication(BB))
8563       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8564 
8565     // Create the block in mask as the first non-phi instruction in the block.
8566     VPBuilder::InsertPointGuard Guard(Builder);
8567     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8568     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8569 
8570     // Introduce the early-exit compare IV <= BTC to form header block mask.
8571     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8572     // Start by constructing the desired canonical IV.
8573     VPValue *IV = nullptr;
8574     if (Legal->getPrimaryInduction())
8575       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8576     else {
8577       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8578       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8579       IV = IVRecipe->getVPSingleValue();
8580     }
8581     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8582     bool TailFolded = !CM.isScalarEpilogueAllowed();
8583 
8584     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8585       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8586       // as a second argument, we only pass the IV here and extract the
8587       // tripcount from the transform state where codegen of the VP instructions
8588       // happen.
8589       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8590     } else {
8591       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8592     }
8593     return BlockMaskCache[BB] = BlockMask;
8594   }
8595 
8596   // This is the block mask. We OR all incoming edges.
8597   for (auto *Predecessor : predecessors(BB)) {
8598     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8599     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8600       return BlockMaskCache[BB] = EdgeMask;
8601 
8602     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8603       BlockMask = EdgeMask;
8604       continue;
8605     }
8606 
8607     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8608   }
8609 
8610   return BlockMaskCache[BB] = BlockMask;
8611 }
8612 
8613 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8614                                                 ArrayRef<VPValue *> Operands,
8615                                                 VFRange &Range,
8616                                                 VPlanPtr &Plan) {
8617   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8618          "Must be called with either a load or store");
8619 
8620   auto willWiden = [&](ElementCount VF) -> bool {
8621     if (VF.isScalar())
8622       return false;
8623     LoopVectorizationCostModel::InstWidening Decision =
8624         CM.getWideningDecision(I, VF);
8625     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8626            "CM decision should be taken at this point.");
8627     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8628       return true;
8629     if (CM.isScalarAfterVectorization(I, VF) ||
8630         CM.isProfitableToScalarize(I, VF))
8631       return false;
8632     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8633   };
8634 
8635   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8636     return nullptr;
8637 
8638   VPValue *Mask = nullptr;
8639   if (Legal->isMaskRequired(I))
8640     Mask = createBlockInMask(I->getParent(), Plan);
8641 
8642   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8643     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask);
8644 
8645   StoreInst *Store = cast<StoreInst>(I);
8646   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8647                                             Mask);
8648 }
8649 
8650 VPWidenIntOrFpInductionRecipe *
8651 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8652                                            ArrayRef<VPValue *> Operands) const {
8653   // Check if this is an integer or fp induction. If so, build the recipe that
8654   // produces its scalar and vector values.
8655   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8656   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8657       II.getKind() == InductionDescriptor::IK_FpInduction) {
8658     assert(II.getStartValue() ==
8659            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8660     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8661     return new VPWidenIntOrFpInductionRecipe(
8662         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8663   }
8664 
8665   return nullptr;
8666 }
8667 
8668 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8669     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8670     VPlan &Plan) const {
8671   // Optimize the special case where the source is a constant integer
8672   // induction variable. Notice that we can only optimize the 'trunc' case
8673   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8674   // (c) other casts depend on pointer size.
8675 
8676   // Determine whether \p K is a truncation based on an induction variable that
8677   // can be optimized.
8678   auto isOptimizableIVTruncate =
8679       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8680     return [=](ElementCount VF) -> bool {
8681       return CM.isOptimizableIVTruncate(K, VF);
8682     };
8683   };
8684 
8685   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8686           isOptimizableIVTruncate(I), Range)) {
8687 
8688     InductionDescriptor II =
8689         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8690     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8691     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8692                                              Start, nullptr, I);
8693   }
8694   return nullptr;
8695 }
8696 
8697 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8698                                                 ArrayRef<VPValue *> Operands,
8699                                                 VPlanPtr &Plan) {
8700   // If all incoming values are equal, the incoming VPValue can be used directly
8701   // instead of creating a new VPBlendRecipe.
8702   VPValue *FirstIncoming = Operands[0];
8703   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8704         return FirstIncoming == Inc;
8705       })) {
8706     return Operands[0];
8707   }
8708 
8709   // We know that all PHIs in non-header blocks are converted into selects, so
8710   // we don't have to worry about the insertion order and we can just use the
8711   // builder. At this point we generate the predication tree. There may be
8712   // duplications since this is a simple recursive scan, but future
8713   // optimizations will clean it up.
8714   SmallVector<VPValue *, 2> OperandsWithMask;
8715   unsigned NumIncoming = Phi->getNumIncomingValues();
8716 
8717   for (unsigned In = 0; In < NumIncoming; In++) {
8718     VPValue *EdgeMask =
8719       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8720     assert((EdgeMask || NumIncoming == 1) &&
8721            "Multiple predecessors with one having a full mask");
8722     OperandsWithMask.push_back(Operands[In]);
8723     if (EdgeMask)
8724       OperandsWithMask.push_back(EdgeMask);
8725   }
8726   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8727 }
8728 
8729 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8730                                                    ArrayRef<VPValue *> Operands,
8731                                                    VFRange &Range) const {
8732 
8733   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8734       [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
8735       Range);
8736 
8737   if (IsPredicated)
8738     return nullptr;
8739 
8740   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8741   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8742              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8743              ID == Intrinsic::pseudoprobe ||
8744              ID == Intrinsic::experimental_noalias_scope_decl))
8745     return nullptr;
8746 
8747   auto willWiden = [&](ElementCount VF) -> bool {
8748     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8749     // The following case may be scalarized depending on the VF.
8750     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8751     // version of the instruction.
8752     // Is it beneficial to perform intrinsic call compared to lib call?
8753     bool NeedToScalarize = false;
8754     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8755     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8756     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8757     assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
8758            "Either the intrinsic cost or vector call cost must be valid");
8759     return UseVectorIntrinsic || !NeedToScalarize;
8760   };
8761 
8762   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8763     return nullptr;
8764 
8765   ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
8766   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8767 }
8768 
8769 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8770   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8771          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8772   // Instruction should be widened, unless it is scalar after vectorization,
8773   // scalarization is profitable or it is predicated.
8774   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8775     return CM.isScalarAfterVectorization(I, VF) ||
8776            CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
8777   };
8778   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8779                                                              Range);
8780 }
8781 
8782 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8783                                            ArrayRef<VPValue *> Operands) const {
8784   auto IsVectorizableOpcode = [](unsigned Opcode) {
8785     switch (Opcode) {
8786     case Instruction::Add:
8787     case Instruction::And:
8788     case Instruction::AShr:
8789     case Instruction::BitCast:
8790     case Instruction::FAdd:
8791     case Instruction::FCmp:
8792     case Instruction::FDiv:
8793     case Instruction::FMul:
8794     case Instruction::FNeg:
8795     case Instruction::FPExt:
8796     case Instruction::FPToSI:
8797     case Instruction::FPToUI:
8798     case Instruction::FPTrunc:
8799     case Instruction::FRem:
8800     case Instruction::FSub:
8801     case Instruction::ICmp:
8802     case Instruction::IntToPtr:
8803     case Instruction::LShr:
8804     case Instruction::Mul:
8805     case Instruction::Or:
8806     case Instruction::PtrToInt:
8807     case Instruction::SDiv:
8808     case Instruction::Select:
8809     case Instruction::SExt:
8810     case Instruction::Shl:
8811     case Instruction::SIToFP:
8812     case Instruction::SRem:
8813     case Instruction::Sub:
8814     case Instruction::Trunc:
8815     case Instruction::UDiv:
8816     case Instruction::UIToFP:
8817     case Instruction::URem:
8818     case Instruction::Xor:
8819     case Instruction::ZExt:
8820       return true;
8821     }
8822     return false;
8823   };
8824 
8825   if (!IsVectorizableOpcode(I->getOpcode()))
8826     return nullptr;
8827 
8828   // Success: widen this instruction.
8829   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8830 }
8831 
8832 void VPRecipeBuilder::fixHeaderPhis() {
8833   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8834   for (VPWidenPHIRecipe *R : PhisToFix) {
8835     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8836     VPRecipeBase *IncR =
8837         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8838     R->addOperand(IncR->getVPSingleValue());
8839   }
8840 }
8841 
8842 VPBasicBlock *VPRecipeBuilder::handleReplication(
8843     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8844     VPlanPtr &Plan) {
8845   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8846       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8847       Range);
8848 
8849   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8850       [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range);
8851 
8852   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8853                                        IsUniform, IsPredicated);
8854   setRecipe(I, Recipe);
8855   Plan->addVPValue(I, Recipe);
8856 
8857   // Find if I uses a predicated instruction. If so, it will use its scalar
8858   // value. Avoid hoisting the insert-element which packs the scalar value into
8859   // a vector value, as that happens iff all users use the vector value.
8860   for (VPValue *Op : Recipe->operands()) {
8861     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8862     if (!PredR)
8863       continue;
8864     auto *RepR =
8865         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8866     assert(RepR->isPredicated() &&
8867            "expected Replicate recipe to be predicated");
8868     RepR->setAlsoPack(false);
8869   }
8870 
8871   // Finalize the recipe for Instr, first if it is not predicated.
8872   if (!IsPredicated) {
8873     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8874     VPBB->appendRecipe(Recipe);
8875     return VPBB;
8876   }
8877   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8878   assert(VPBB->getSuccessors().empty() &&
8879          "VPBB has successors when handling predicated replication.");
8880   // Record predicated instructions for above packing optimizations.
8881   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8882   VPBlockUtils::insertBlockAfter(Region, VPBB);
8883   auto *RegSucc = new VPBasicBlock();
8884   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8885   return RegSucc;
8886 }
8887 
8888 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8889                                                       VPRecipeBase *PredRecipe,
8890                                                       VPlanPtr &Plan) {
8891   // Instructions marked for predication are replicated and placed under an
8892   // if-then construct to prevent side-effects.
8893 
8894   // Generate recipes to compute the block mask for this region.
8895   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8896 
8897   // Build the triangular if-then region.
8898   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8899   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8900   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8901   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8902   auto *PHIRecipe = Instr->getType()->isVoidTy()
8903                         ? nullptr
8904                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8905   if (PHIRecipe) {
8906     Plan->removeVPValueFor(Instr);
8907     Plan->addVPValue(Instr, PHIRecipe);
8908   }
8909   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8910   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8911   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8912 
8913   // Note: first set Entry as region entry and then connect successors starting
8914   // from it in order, to propagate the "parent" of each VPBasicBlock.
8915   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8916   VPBlockUtils::connectBlocks(Pred, Exit);
8917 
8918   return Region;
8919 }
8920 
8921 VPRecipeOrVPValueTy
8922 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8923                                         ArrayRef<VPValue *> Operands,
8924                                         VFRange &Range, VPlanPtr &Plan) {
8925   // First, check for specific widening recipes that deal with calls, memory
8926   // operations, inductions and Phi nodes.
8927   if (auto *CI = dyn_cast<CallInst>(Instr))
8928     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8929 
8930   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8931     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8932 
8933   VPRecipeBase *Recipe;
8934   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8935     if (Phi->getParent() != OrigLoop->getHeader())
8936       return tryToBlend(Phi, Operands, Plan);
8937     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8938       return toVPRecipeResult(Recipe);
8939 
8940     VPWidenPHIRecipe *PhiRecipe = nullptr;
8941     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8942       VPValue *StartV = Operands[0];
8943       if (Legal->isReductionVariable(Phi)) {
8944         RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8945         assert(RdxDesc.getRecurrenceStartValue() ==
8946                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8947         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8948                                              CM.isInLoopReduction(Phi),
8949                                              CM.useOrderedReductions(RdxDesc));
8950       } else {
8951         PhiRecipe = new VPWidenPHIRecipe(Phi, *StartV);
8952       }
8953 
8954       // Record the incoming value from the backedge, so we can add the incoming
8955       // value from the backedge after all recipes have been created.
8956       recordRecipeOf(cast<Instruction>(
8957           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8958       PhisToFix.push_back(PhiRecipe);
8959     } else {
8960       // TODO: record start and backedge value for remaining pointer induction
8961       // phis.
8962       assert(Phi->getType()->isPointerTy() &&
8963              "only pointer phis should be handled here");
8964       PhiRecipe = new VPWidenPHIRecipe(Phi);
8965     }
8966 
8967     return toVPRecipeResult(PhiRecipe);
8968   }
8969 
8970   if (isa<TruncInst>(Instr) &&
8971       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8972                                                Range, *Plan)))
8973     return toVPRecipeResult(Recipe);
8974 
8975   if (!shouldWiden(Instr, Range))
8976     return nullptr;
8977 
8978   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8979     return toVPRecipeResult(new VPWidenGEPRecipe(
8980         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8981 
8982   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8983     bool InvariantCond =
8984         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8985     return toVPRecipeResult(new VPWidenSelectRecipe(
8986         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8987   }
8988 
8989   return toVPRecipeResult(tryToWiden(Instr, Operands));
8990 }
8991 
8992 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8993                                                         ElementCount MaxVF) {
8994   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8995 
8996   // Collect instructions from the original loop that will become trivially dead
8997   // in the vectorized loop. We don't need to vectorize these instructions. For
8998   // example, original induction update instructions can become dead because we
8999   // separately emit induction "steps" when generating code for the new loop.
9000   // Similarly, we create a new latch condition when setting up the structure
9001   // of the new loop, so the old one can become dead.
9002   SmallPtrSet<Instruction *, 4> DeadInstructions;
9003   collectTriviallyDeadInstructions(DeadInstructions);
9004 
9005   // Add assume instructions we need to drop to DeadInstructions, to prevent
9006   // them from being added to the VPlan.
9007   // TODO: We only need to drop assumes in blocks that get flattend. If the
9008   // control flow is preserved, we should keep them.
9009   auto &ConditionalAssumes = Legal->getConditionalAssumes();
9010   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
9011 
9012   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
9013   // Dead instructions do not need sinking. Remove them from SinkAfter.
9014   for (Instruction *I : DeadInstructions)
9015     SinkAfter.erase(I);
9016 
9017   // Cannot sink instructions after dead instructions (there won't be any
9018   // recipes for them). Instead, find the first non-dead previous instruction.
9019   for (auto &P : Legal->getSinkAfter()) {
9020     Instruction *SinkTarget = P.second;
9021     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
9022     (void)FirstInst;
9023     while (DeadInstructions.contains(SinkTarget)) {
9024       assert(
9025           SinkTarget != FirstInst &&
9026           "Must find a live instruction (at least the one feeding the "
9027           "first-order recurrence PHI) before reaching beginning of the block");
9028       SinkTarget = SinkTarget->getPrevNode();
9029       assert(SinkTarget != P.first &&
9030              "sink source equals target, no sinking required");
9031     }
9032     P.second = SinkTarget;
9033   }
9034 
9035   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
9036   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
9037     VFRange SubRange = {VF, MaxVFPlusOne};
9038     VPlans.push_back(
9039         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
9040     VF = SubRange.End;
9041   }
9042 }
9043 
9044 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
9045     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
9046     const MapVector<Instruction *, Instruction *> &SinkAfter) {
9047 
9048   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
9049 
9050   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
9051 
9052   // ---------------------------------------------------------------------------
9053   // Pre-construction: record ingredients whose recipes we'll need to further
9054   // process after constructing the initial VPlan.
9055   // ---------------------------------------------------------------------------
9056 
9057   // Mark instructions we'll need to sink later and their targets as
9058   // ingredients whose recipe we'll need to record.
9059   for (auto &Entry : SinkAfter) {
9060     RecipeBuilder.recordRecipeOf(Entry.first);
9061     RecipeBuilder.recordRecipeOf(Entry.second);
9062   }
9063   for (auto &Reduction : CM.getInLoopReductionChains()) {
9064     PHINode *Phi = Reduction.first;
9065     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
9066     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9067 
9068     RecipeBuilder.recordRecipeOf(Phi);
9069     for (auto &R : ReductionOperations) {
9070       RecipeBuilder.recordRecipeOf(R);
9071       // For min/max reducitons, where we have a pair of icmp/select, we also
9072       // need to record the ICmp recipe, so it can be removed later.
9073       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9074         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9075     }
9076   }
9077 
9078   // For each interleave group which is relevant for this (possibly trimmed)
9079   // Range, add it to the set of groups to be later applied to the VPlan and add
9080   // placeholders for its members' Recipes which we'll be replacing with a
9081   // single VPInterleaveRecipe.
9082   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9083     auto applyIG = [IG, this](ElementCount VF) -> bool {
9084       return (VF.isVector() && // Query is illegal for VF == 1
9085               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9086                   LoopVectorizationCostModel::CM_Interleave);
9087     };
9088     if (!getDecisionAndClampRange(applyIG, Range))
9089       continue;
9090     InterleaveGroups.insert(IG);
9091     for (unsigned i = 0; i < IG->getFactor(); i++)
9092       if (Instruction *Member = IG->getMember(i))
9093         RecipeBuilder.recordRecipeOf(Member);
9094   };
9095 
9096   // ---------------------------------------------------------------------------
9097   // Build initial VPlan: Scan the body of the loop in a topological order to
9098   // visit each basic block after having visited its predecessor basic blocks.
9099   // ---------------------------------------------------------------------------
9100 
9101   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
9102   auto Plan = std::make_unique<VPlan>();
9103   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
9104   Plan->setEntry(VPBB);
9105 
9106   // Scan the body of the loop in a topological order to visit each basic block
9107   // after having visited its predecessor basic blocks.
9108   LoopBlocksDFS DFS(OrigLoop);
9109   DFS.perform(LI);
9110 
9111   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9112     // Relevant instructions from basic block BB will be grouped into VPRecipe
9113     // ingredients and fill a new VPBasicBlock.
9114     unsigned VPBBsForBB = 0;
9115     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
9116     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
9117     VPBB = FirstVPBBForBB;
9118     Builder.setInsertPoint(VPBB);
9119 
9120     // Introduce each ingredient into VPlan.
9121     // TODO: Model and preserve debug instrinsics in VPlan.
9122     for (Instruction &I : BB->instructionsWithoutDebug()) {
9123       Instruction *Instr = &I;
9124 
9125       // First filter out irrelevant instructions, to ensure no recipes are
9126       // built for them.
9127       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9128         continue;
9129 
9130       SmallVector<VPValue *, 4> Operands;
9131       auto *Phi = dyn_cast<PHINode>(Instr);
9132       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9133         Operands.push_back(Plan->getOrAddVPValue(
9134             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9135       } else {
9136         auto OpRange = Plan->mapToVPValues(Instr->operands());
9137         Operands = {OpRange.begin(), OpRange.end()};
9138       }
9139       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9140               Instr, Operands, Range, Plan)) {
9141         // If Instr can be simplified to an existing VPValue, use it.
9142         if (RecipeOrValue.is<VPValue *>()) {
9143           auto *VPV = RecipeOrValue.get<VPValue *>();
9144           Plan->addVPValue(Instr, VPV);
9145           // If the re-used value is a recipe, register the recipe for the
9146           // instruction, in case the recipe for Instr needs to be recorded.
9147           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9148             RecipeBuilder.setRecipe(Instr, R);
9149           continue;
9150         }
9151         // Otherwise, add the new recipe.
9152         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9153         for (auto *Def : Recipe->definedValues()) {
9154           auto *UV = Def->getUnderlyingValue();
9155           Plan->addVPValue(UV, Def);
9156         }
9157 
9158         RecipeBuilder.setRecipe(Instr, Recipe);
9159         VPBB->appendRecipe(Recipe);
9160         continue;
9161       }
9162 
9163       // Otherwise, if all widening options failed, Instruction is to be
9164       // replicated. This may create a successor for VPBB.
9165       VPBasicBlock *NextVPBB =
9166           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9167       if (NextVPBB != VPBB) {
9168         VPBB = NextVPBB;
9169         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9170                                     : "");
9171       }
9172     }
9173   }
9174 
9175   RecipeBuilder.fixHeaderPhis();
9176 
9177   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
9178   // may also be empty, such as the last one VPBB, reflecting original
9179   // basic-blocks with no recipes.
9180   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
9181   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
9182   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
9183   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
9184   delete PreEntry;
9185 
9186   // ---------------------------------------------------------------------------
9187   // Transform initial VPlan: Apply previously taken decisions, in order, to
9188   // bring the VPlan to its final state.
9189   // ---------------------------------------------------------------------------
9190 
9191   // Apply Sink-After legal constraints.
9192   for (auto &Entry : SinkAfter) {
9193     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9194     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9195 
9196     auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9197       auto *Region =
9198           dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9199       if (Region && Region->isReplicator()) {
9200         assert(Region->getNumSuccessors() == 1 &&
9201                Region->getNumPredecessors() == 1 && "Expected SESE region!");
9202         assert(R->getParent()->size() == 1 &&
9203                "A recipe in an original replicator region must be the only "
9204                "recipe in its block");
9205         return Region;
9206       }
9207       return nullptr;
9208     };
9209     auto *TargetRegion = GetReplicateRegion(Target);
9210     auto *SinkRegion = GetReplicateRegion(Sink);
9211     if (!SinkRegion) {
9212       // If the sink source is not a replicate region, sink the recipe directly.
9213       if (TargetRegion) {
9214         // The target is in a replication region, make sure to move Sink to
9215         // the block after it, not into the replication region itself.
9216         VPBasicBlock *NextBlock =
9217             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9218         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9219       } else
9220         Sink->moveAfter(Target);
9221       continue;
9222     }
9223 
9224     // The sink source is in a replicate region. Unhook the region from the CFG.
9225     auto *SinkPred = SinkRegion->getSinglePredecessor();
9226     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9227     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9228     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9229     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9230 
9231     if (TargetRegion) {
9232       // The target recipe is also in a replicate region, move the sink region
9233       // after the target region.
9234       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9235       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9236       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9237       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9238     } else {
9239       // The sink source is in a replicate region, we need to move the whole
9240       // replicate region, which should only contain a single recipe in the main
9241       // block.
9242       auto *SplitBlock =
9243           Target->getParent()->splitAt(std::next(Target->getIterator()));
9244 
9245       auto *SplitPred = SplitBlock->getSinglePredecessor();
9246 
9247       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9248       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9249       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9250       if (VPBB == SplitPred)
9251         VPBB = SplitBlock;
9252     }
9253   }
9254 
9255   // Interleave memory: for each Interleave Group we marked earlier as relevant
9256   // for this VPlan, replace the Recipes widening its memory instructions with a
9257   // single VPInterleaveRecipe at its insertion point.
9258   for (auto IG : InterleaveGroups) {
9259     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9260         RecipeBuilder.getRecipe(IG->getInsertPos()));
9261     SmallVector<VPValue *, 4> StoredValues;
9262     for (unsigned i = 0; i < IG->getFactor(); ++i)
9263       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
9264         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
9265 
9266     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9267                                         Recipe->getMask());
9268     VPIG->insertBefore(Recipe);
9269     unsigned J = 0;
9270     for (unsigned i = 0; i < IG->getFactor(); ++i)
9271       if (Instruction *Member = IG->getMember(i)) {
9272         if (!Member->getType()->isVoidTy()) {
9273           VPValue *OriginalV = Plan->getVPValue(Member);
9274           Plan->removeVPValueFor(Member);
9275           Plan->addVPValue(Member, VPIG->getVPValue(J));
9276           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9277           J++;
9278         }
9279         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9280       }
9281   }
9282 
9283   // Adjust the recipes for any inloop reductions.
9284   adjustRecipesForInLoopReductions(Plan, RecipeBuilder, Range.Start);
9285 
9286   // Finally, if tail is folded by masking, introduce selects between the phi
9287   // and the live-out instruction of each reduction, at the end of the latch.
9288   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
9289     Builder.setInsertPoint(VPBB);
9290     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9291     for (auto &Reduction : Legal->getReductionVars()) {
9292       if (CM.isInLoopReduction(Reduction.first))
9293         continue;
9294       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
9295       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
9296       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
9297     }
9298   }
9299 
9300   VPlanTransforms::sinkScalarOperands(*Plan);
9301   VPlanTransforms::mergeReplicateRegions(*Plan);
9302 
9303   std::string PlanName;
9304   raw_string_ostream RSO(PlanName);
9305   ElementCount VF = Range.Start;
9306   Plan->addVF(VF);
9307   RSO << "Initial VPlan for VF={" << VF;
9308   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9309     Plan->addVF(VF);
9310     RSO << "," << VF;
9311   }
9312   RSO << "},UF>=1";
9313   RSO.flush();
9314   Plan->setName(PlanName);
9315 
9316   return Plan;
9317 }
9318 
9319 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9320   // Outer loop handling: They may require CFG and instruction level
9321   // transformations before even evaluating whether vectorization is profitable.
9322   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9323   // the vectorization pipeline.
9324   assert(!OrigLoop->isInnermost());
9325   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9326 
9327   // Create new empty VPlan
9328   auto Plan = std::make_unique<VPlan>();
9329 
9330   // Build hierarchical CFG
9331   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9332   HCFGBuilder.buildHierarchicalCFG();
9333 
9334   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9335        VF *= 2)
9336     Plan->addVF(VF);
9337 
9338   if (EnableVPlanPredication) {
9339     VPlanPredicator VPP(*Plan);
9340     VPP.predicate();
9341 
9342     // Avoid running transformation to recipes until masked code generation in
9343     // VPlan-native path is in place.
9344     return Plan;
9345   }
9346 
9347   SmallPtrSet<Instruction *, 1> DeadInstructions;
9348   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9349                                              Legal->getInductionVars(),
9350                                              DeadInstructions, *PSE.getSE());
9351   return Plan;
9352 }
9353 
9354 // Adjust the recipes for any inloop reductions. The chain of instructions
9355 // leading from the loop exit instr to the phi need to be converted to
9356 // reductions, with one operand being vector and the other being the scalar
9357 // reduction chain.
9358 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
9359     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
9360   for (auto &Reduction : CM.getInLoopReductionChains()) {
9361     PHINode *Phi = Reduction.first;
9362     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9363     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9364 
9365     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9366       continue;
9367 
9368     // ReductionOperations are orders top-down from the phi's use to the
9369     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9370     // which of the two operands will remain scalar and which will be reduced.
9371     // For minmax the chain will be the select instructions.
9372     Instruction *Chain = Phi;
9373     for (Instruction *R : ReductionOperations) {
9374       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9375       RecurKind Kind = RdxDesc.getRecurrenceKind();
9376 
9377       VPValue *ChainOp = Plan->getVPValue(Chain);
9378       unsigned FirstOpId;
9379       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9380         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9381                "Expected to replace a VPWidenSelectSC");
9382         FirstOpId = 1;
9383       } else {
9384         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) &&
9385                "Expected to replace a VPWidenSC");
9386         FirstOpId = 0;
9387       }
9388       unsigned VecOpId =
9389           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9390       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9391 
9392       auto *CondOp = CM.foldTailByMasking()
9393                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9394                          : nullptr;
9395       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
9396           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9397       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9398       Plan->removeVPValueFor(R);
9399       Plan->addVPValue(R, RedRecipe);
9400       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9401       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9402       WidenRecipe->eraseFromParent();
9403 
9404       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9405         VPRecipeBase *CompareRecipe =
9406             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9407         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9408                "Expected to replace a VPWidenSC");
9409         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9410                "Expected no remaining users");
9411         CompareRecipe->eraseFromParent();
9412       }
9413       Chain = R;
9414     }
9415   }
9416 }
9417 
9418 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9419 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9420                                VPSlotTracker &SlotTracker) const {
9421   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9422   IG->getInsertPos()->printAsOperand(O, false);
9423   O << ", ";
9424   getAddr()->printAsOperand(O, SlotTracker);
9425   VPValue *Mask = getMask();
9426   if (Mask) {
9427     O << ", ";
9428     Mask->printAsOperand(O, SlotTracker);
9429   }
9430   for (unsigned i = 0; i < IG->getFactor(); ++i)
9431     if (Instruction *I = IG->getMember(i))
9432       O << "\n" << Indent << "  " << VPlanIngredient(I) << " " << i;
9433 }
9434 #endif
9435 
9436 void VPWidenCallRecipe::execute(VPTransformState &State) {
9437   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9438                                   *this, State);
9439 }
9440 
9441 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9442   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9443                                     this, *this, InvariantCond, State);
9444 }
9445 
9446 void VPWidenRecipe::execute(VPTransformState &State) {
9447   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9448 }
9449 
9450 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9451   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9452                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9453                       IsIndexLoopInvariant, State);
9454 }
9455 
9456 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9457   assert(!State.Instance && "Int or FP induction being replicated.");
9458   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9459                                    getTruncInst(), getVPValue(0),
9460                                    getCastValue(), State);
9461 }
9462 
9463 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9464   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9465                                  State);
9466 }
9467 
9468 void VPBlendRecipe::execute(VPTransformState &State) {
9469   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9470   // We know that all PHIs in non-header blocks are converted into
9471   // selects, so we don't have to worry about the insertion order and we
9472   // can just use the builder.
9473   // At this point we generate the predication tree. There may be
9474   // duplications since this is a simple recursive scan, but future
9475   // optimizations will clean it up.
9476 
9477   unsigned NumIncoming = getNumIncomingValues();
9478 
9479   // Generate a sequence of selects of the form:
9480   // SELECT(Mask3, In3,
9481   //        SELECT(Mask2, In2,
9482   //               SELECT(Mask1, In1,
9483   //                      In0)))
9484   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9485   // are essentially undef are taken from In0.
9486   InnerLoopVectorizer::VectorParts Entry(State.UF);
9487   for (unsigned In = 0; In < NumIncoming; ++In) {
9488     for (unsigned Part = 0; Part < State.UF; ++Part) {
9489       // We might have single edge PHIs (blocks) - use an identity
9490       // 'select' for the first PHI operand.
9491       Value *In0 = State.get(getIncomingValue(In), Part);
9492       if (In == 0)
9493         Entry[Part] = In0; // Initialize with the first incoming value.
9494       else {
9495         // Select between the current value and the previous incoming edge
9496         // based on the incoming mask.
9497         Value *Cond = State.get(getMask(In), Part);
9498         Entry[Part] =
9499             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9500       }
9501     }
9502   }
9503   for (unsigned Part = 0; Part < State.UF; ++Part)
9504     State.set(this, Entry[Part], Part);
9505 }
9506 
9507 void VPInterleaveRecipe::execute(VPTransformState &State) {
9508   assert(!State.Instance && "Interleave group being replicated.");
9509   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9510                                       getStoredValues(), getMask());
9511 }
9512 
9513 void VPReductionRecipe::execute(VPTransformState &State) {
9514   assert(!State.Instance && "Reduction being replicated.");
9515   Value *PrevInChain = State.get(getChainOp(), 0);
9516   for (unsigned Part = 0; Part < State.UF; ++Part) {
9517     RecurKind Kind = RdxDesc->getRecurrenceKind();
9518     bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9519     Value *NewVecOp = State.get(getVecOp(), Part);
9520     if (VPValue *Cond = getCondOp()) {
9521       Value *NewCond = State.get(Cond, Part);
9522       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9523       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9524           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9525       Constant *IdenVec =
9526           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9527       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9528       NewVecOp = Select;
9529     }
9530     Value *NewRed;
9531     Value *NextInChain;
9532     if (IsOrdered) {
9533       if (State.VF.isVector())
9534         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9535                                         PrevInChain);
9536       else
9537         NewRed = State.Builder.CreateBinOp(
9538             (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(),
9539             PrevInChain, NewVecOp);
9540       PrevInChain = NewRed;
9541     } else {
9542       PrevInChain = State.get(getChainOp(), Part);
9543       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9544     }
9545     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9546       NextInChain =
9547           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9548                          NewRed, PrevInChain);
9549     } else if (IsOrdered)
9550       NextInChain = NewRed;
9551     else {
9552       NextInChain = State.Builder.CreateBinOp(
9553           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9554           PrevInChain);
9555     }
9556     State.set(this, NextInChain, Part);
9557   }
9558 }
9559 
9560 void VPReplicateRecipe::execute(VPTransformState &State) {
9561   if (State.Instance) { // Generate a single instance.
9562     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9563     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9564                                     *State.Instance, IsPredicated, State);
9565     // Insert scalar instance packing it into a vector.
9566     if (AlsoPack && State.VF.isVector()) {
9567       // If we're constructing lane 0, initialize to start from poison.
9568       if (State.Instance->Lane.isFirstLane()) {
9569         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9570         Value *Poison = PoisonValue::get(
9571             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9572         State.set(this, Poison, State.Instance->Part);
9573       }
9574       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9575     }
9576     return;
9577   }
9578 
9579   // Generate scalar instances for all VF lanes of all UF parts, unless the
9580   // instruction is uniform inwhich case generate only the first lane for each
9581   // of the UF parts.
9582   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9583   assert((!State.VF.isScalable() || IsUniform) &&
9584          "Can't scalarize a scalable vector");
9585   for (unsigned Part = 0; Part < State.UF; ++Part)
9586     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9587       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9588                                       VPIteration(Part, Lane), IsPredicated,
9589                                       State);
9590 }
9591 
9592 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9593   assert(State.Instance && "Branch on Mask works only on single instance.");
9594 
9595   unsigned Part = State.Instance->Part;
9596   unsigned Lane = State.Instance->Lane.getKnownLane();
9597 
9598   Value *ConditionBit = nullptr;
9599   VPValue *BlockInMask = getMask();
9600   if (BlockInMask) {
9601     ConditionBit = State.get(BlockInMask, Part);
9602     if (ConditionBit->getType()->isVectorTy())
9603       ConditionBit = State.Builder.CreateExtractElement(
9604           ConditionBit, State.Builder.getInt32(Lane));
9605   } else // Block in mask is all-one.
9606     ConditionBit = State.Builder.getTrue();
9607 
9608   // Replace the temporary unreachable terminator with a new conditional branch,
9609   // whose two destinations will be set later when they are created.
9610   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9611   assert(isa<UnreachableInst>(CurrentTerminator) &&
9612          "Expected to replace unreachable terminator with conditional branch.");
9613   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9614   CondBr->setSuccessor(0, nullptr);
9615   ReplaceInstWithInst(CurrentTerminator, CondBr);
9616 }
9617 
9618 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9619   assert(State.Instance && "Predicated instruction PHI works per instance.");
9620   Instruction *ScalarPredInst =
9621       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9622   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9623   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9624   assert(PredicatingBB && "Predicated block has no single predecessor.");
9625   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9626          "operand must be VPReplicateRecipe");
9627 
9628   // By current pack/unpack logic we need to generate only a single phi node: if
9629   // a vector value for the predicated instruction exists at this point it means
9630   // the instruction has vector users only, and a phi for the vector value is
9631   // needed. In this case the recipe of the predicated instruction is marked to
9632   // also do that packing, thereby "hoisting" the insert-element sequence.
9633   // Otherwise, a phi node for the scalar value is needed.
9634   unsigned Part = State.Instance->Part;
9635   if (State.hasVectorValue(getOperand(0), Part)) {
9636     Value *VectorValue = State.get(getOperand(0), Part);
9637     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9638     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9639     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9640     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9641     if (State.hasVectorValue(this, Part))
9642       State.reset(this, VPhi, Part);
9643     else
9644       State.set(this, VPhi, Part);
9645     // NOTE: Currently we need to update the value of the operand, so the next
9646     // predicated iteration inserts its generated value in the correct vector.
9647     State.reset(getOperand(0), VPhi, Part);
9648   } else {
9649     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9650     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9651     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9652                      PredicatingBB);
9653     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9654     if (State.hasScalarValue(this, *State.Instance))
9655       State.reset(this, Phi, *State.Instance);
9656     else
9657       State.set(this, Phi, *State.Instance);
9658     // NOTE: Currently we need to update the value of the operand, so the next
9659     // predicated iteration inserts its generated value in the correct vector.
9660     State.reset(getOperand(0), Phi, *State.Instance);
9661   }
9662 }
9663 
9664 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9665   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9666   State.ILV->vectorizeMemoryInstruction(
9667       &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(),
9668       StoredValue, getMask());
9669 }
9670 
9671 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9672 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9673 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9674 // for predication.
9675 static ScalarEpilogueLowering getScalarEpilogueLowering(
9676     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9677     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9678     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9679     LoopVectorizationLegality &LVL) {
9680   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9681   // don't look at hints or options, and don't request a scalar epilogue.
9682   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9683   // LoopAccessInfo (due to code dependency and not being able to reliably get
9684   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9685   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9686   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9687   // back to the old way and vectorize with versioning when forced. See D81345.)
9688   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9689                                                       PGSOQueryType::IRPass) &&
9690                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9691     return CM_ScalarEpilogueNotAllowedOptSize;
9692 
9693   // 2) If set, obey the directives
9694   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9695     switch (PreferPredicateOverEpilogue) {
9696     case PreferPredicateTy::ScalarEpilogue:
9697       return CM_ScalarEpilogueAllowed;
9698     case PreferPredicateTy::PredicateElseScalarEpilogue:
9699       return CM_ScalarEpilogueNotNeededUsePredicate;
9700     case PreferPredicateTy::PredicateOrDontVectorize:
9701       return CM_ScalarEpilogueNotAllowedUsePredicate;
9702     };
9703   }
9704 
9705   // 3) If set, obey the hints
9706   switch (Hints.getPredicate()) {
9707   case LoopVectorizeHints::FK_Enabled:
9708     return CM_ScalarEpilogueNotNeededUsePredicate;
9709   case LoopVectorizeHints::FK_Disabled:
9710     return CM_ScalarEpilogueAllowed;
9711   };
9712 
9713   // 4) if the TTI hook indicates this is profitable, request predication.
9714   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9715                                        LVL.getLAI()))
9716     return CM_ScalarEpilogueNotNeededUsePredicate;
9717 
9718   return CM_ScalarEpilogueAllowed;
9719 }
9720 
9721 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9722   // If Values have been set for this Def return the one relevant for \p Part.
9723   if (hasVectorValue(Def, Part))
9724     return Data.PerPartOutput[Def][Part];
9725 
9726   if (!hasScalarValue(Def, {Part, 0})) {
9727     Value *IRV = Def->getLiveInIRValue();
9728     Value *B = ILV->getBroadcastInstrs(IRV);
9729     set(Def, B, Part);
9730     return B;
9731   }
9732 
9733   Value *ScalarValue = get(Def, {Part, 0});
9734   // If we aren't vectorizing, we can just copy the scalar map values over
9735   // to the vector map.
9736   if (VF.isScalar()) {
9737     set(Def, ScalarValue, Part);
9738     return ScalarValue;
9739   }
9740 
9741   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9742   bool IsUniform = RepR && RepR->isUniform();
9743 
9744   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9745   // Check if there is a scalar value for the selected lane.
9746   if (!hasScalarValue(Def, {Part, LastLane})) {
9747     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
9748     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
9749            "unexpected recipe found to be invariant");
9750     IsUniform = true;
9751     LastLane = 0;
9752   }
9753 
9754   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9755   // Set the insert point after the last scalarized instruction or after the
9756   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
9757   // will directly follow the scalar definitions.
9758   auto OldIP = Builder.saveIP();
9759   auto NewIP =
9760       isa<PHINode>(LastInst)
9761           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
9762           : std::next(BasicBlock::iterator(LastInst));
9763   Builder.SetInsertPoint(&*NewIP);
9764 
9765   // However, if we are vectorizing, we need to construct the vector values.
9766   // If the value is known to be uniform after vectorization, we can just
9767   // broadcast the scalar value corresponding to lane zero for each unroll
9768   // iteration. Otherwise, we construct the vector values using
9769   // insertelement instructions. Since the resulting vectors are stored in
9770   // State, we will only generate the insertelements once.
9771   Value *VectorValue = nullptr;
9772   if (IsUniform) {
9773     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9774     set(Def, VectorValue, Part);
9775   } else {
9776     // Initialize packing with insertelements to start from undef.
9777     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9778     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9779     set(Def, Undef, Part);
9780     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9781       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9782     VectorValue = get(Def, Part);
9783   }
9784   Builder.restoreIP(OldIP);
9785   return VectorValue;
9786 }
9787 
9788 // Process the loop in the VPlan-native vectorization path. This path builds
9789 // VPlan upfront in the vectorization pipeline, which allows to apply
9790 // VPlan-to-VPlan transformations from the very beginning without modifying the
9791 // input LLVM IR.
9792 static bool processLoopInVPlanNativePath(
9793     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9794     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9795     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9796     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9797     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
9798     LoopVectorizationRequirements &Requirements) {
9799 
9800   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9801     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9802     return false;
9803   }
9804   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9805   Function *F = L->getHeader()->getParent();
9806   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9807 
9808   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9809       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9810 
9811   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9812                                 &Hints, IAI);
9813   // Use the planner for outer loop vectorization.
9814   // TODO: CM is not used at this point inside the planner. Turn CM into an
9815   // optional argument if we don't need it in the future.
9816   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
9817                                Requirements, ORE);
9818 
9819   // Get user vectorization factor.
9820   ElementCount UserVF = Hints.getWidth();
9821 
9822   CM.collectElementTypesForWidening();
9823 
9824   // Plan how to best vectorize, return the best VF and its cost.
9825   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9826 
9827   // If we are stress testing VPlan builds, do not attempt to generate vector
9828   // code. Masked vector code generation support will follow soon.
9829   // Also, do not attempt to vectorize if no vector code will be produced.
9830   if (VPlanBuildStressTest || EnableVPlanPredication ||
9831       VectorizationFactor::Disabled() == VF)
9832     return false;
9833 
9834   LVP.setBestPlan(VF.Width, 1);
9835 
9836   {
9837     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9838                              F->getParent()->getDataLayout());
9839     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9840                            &CM, BFI, PSI, Checks);
9841     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9842                       << L->getHeader()->getParent()->getName() << "\"\n");
9843     LVP.executePlan(LB, DT);
9844   }
9845 
9846   // Mark the loop as already vectorized to avoid vectorizing again.
9847   Hints.setAlreadyVectorized();
9848   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9849   return true;
9850 }
9851 
9852 // Emit a remark if there are stores to floats that required a floating point
9853 // extension. If the vectorized loop was generated with floating point there
9854 // will be a performance penalty from the conversion overhead and the change in
9855 // the vector width.
9856 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9857   SmallVector<Instruction *, 4> Worklist;
9858   for (BasicBlock *BB : L->getBlocks()) {
9859     for (Instruction &Inst : *BB) {
9860       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9861         if (S->getValueOperand()->getType()->isFloatTy())
9862           Worklist.push_back(S);
9863       }
9864     }
9865   }
9866 
9867   // Traverse the floating point stores upwards searching, for floating point
9868   // conversions.
9869   SmallPtrSet<const Instruction *, 4> Visited;
9870   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9871   while (!Worklist.empty()) {
9872     auto *I = Worklist.pop_back_val();
9873     if (!L->contains(I))
9874       continue;
9875     if (!Visited.insert(I).second)
9876       continue;
9877 
9878     // Emit a remark if the floating point store required a floating
9879     // point conversion.
9880     // TODO: More work could be done to identify the root cause such as a
9881     // constant or a function return type and point the user to it.
9882     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9883       ORE->emit([&]() {
9884         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9885                                           I->getDebugLoc(), L->getHeader())
9886                << "floating point conversion changes vector width. "
9887                << "Mixed floating point precision requires an up/down "
9888                << "cast that will negatively impact performance.";
9889       });
9890 
9891     for (Use &Op : I->operands())
9892       if (auto *OpI = dyn_cast<Instruction>(Op))
9893         Worklist.push_back(OpI);
9894   }
9895 }
9896 
9897 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9898     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9899                                !EnableLoopInterleaving),
9900       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9901                               !EnableLoopVectorization) {}
9902 
9903 bool LoopVectorizePass::processLoop(Loop *L) {
9904   assert((EnableVPlanNativePath || L->isInnermost()) &&
9905          "VPlan-native path is not enabled. Only process inner loops.");
9906 
9907 #ifndef NDEBUG
9908   const std::string DebugLocStr = getDebugLocString(L);
9909 #endif /* NDEBUG */
9910 
9911   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9912                     << L->getHeader()->getParent()->getName() << "\" from "
9913                     << DebugLocStr << "\n");
9914 
9915   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9916 
9917   LLVM_DEBUG(
9918       dbgs() << "LV: Loop hints:"
9919              << " force="
9920              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9921                      ? "disabled"
9922                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9923                             ? "enabled"
9924                             : "?"))
9925              << " width=" << Hints.getWidth()
9926              << " interleave=" << Hints.getInterleave() << "\n");
9927 
9928   // Function containing loop
9929   Function *F = L->getHeader()->getParent();
9930 
9931   // Looking at the diagnostic output is the only way to determine if a loop
9932   // was vectorized (other than looking at the IR or machine code), so it
9933   // is important to generate an optimization remark for each loop. Most of
9934   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9935   // generated as OptimizationRemark and OptimizationRemarkMissed are
9936   // less verbose reporting vectorized loops and unvectorized loops that may
9937   // benefit from vectorization, respectively.
9938 
9939   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9940     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9941     return false;
9942   }
9943 
9944   PredicatedScalarEvolution PSE(*SE, *L);
9945 
9946   // Check if it is legal to vectorize the loop.
9947   LoopVectorizationRequirements Requirements;
9948   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9949                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9950   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9951     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9952     Hints.emitRemarkWithHints();
9953     return false;
9954   }
9955 
9956   // Check the function attributes and profiles to find out if this function
9957   // should be optimized for size.
9958   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9959       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9960 
9961   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9962   // here. They may require CFG and instruction level transformations before
9963   // even evaluating whether vectorization is profitable. Since we cannot modify
9964   // the incoming IR, we need to build VPlan upfront in the vectorization
9965   // pipeline.
9966   if (!L->isInnermost())
9967     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9968                                         ORE, BFI, PSI, Hints, Requirements);
9969 
9970   assert(L->isInnermost() && "Inner loop expected.");
9971 
9972   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9973   // count by optimizing for size, to minimize overheads.
9974   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9975   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9976     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9977                       << "This loop is worth vectorizing only if no scalar "
9978                       << "iteration overheads are incurred.");
9979     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9980       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9981     else {
9982       LLVM_DEBUG(dbgs() << "\n");
9983       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9984     }
9985   }
9986 
9987   // Check the function attributes to see if implicit floats are allowed.
9988   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9989   // an integer loop and the vector instructions selected are purely integer
9990   // vector instructions?
9991   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9992     reportVectorizationFailure(
9993         "Can't vectorize when the NoImplicitFloat attribute is used",
9994         "loop not vectorized due to NoImplicitFloat attribute",
9995         "NoImplicitFloat", ORE, L);
9996     Hints.emitRemarkWithHints();
9997     return false;
9998   }
9999 
10000   // Check if the target supports potentially unsafe FP vectorization.
10001   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10002   // for the target we're vectorizing for, to make sure none of the
10003   // additional fp-math flags can help.
10004   if (Hints.isPotentiallyUnsafe() &&
10005       TTI->isFPVectorizationPotentiallyUnsafe()) {
10006     reportVectorizationFailure(
10007         "Potentially unsafe FP op prevents vectorization",
10008         "loop not vectorized due to unsafe FP support.",
10009         "UnsafeFP", ORE, L);
10010     Hints.emitRemarkWithHints();
10011     return false;
10012   }
10013 
10014   if (!LVL.canVectorizeFPMath(EnableStrictReductions)) {
10015     ORE->emit([&]() {
10016       auto *ExactFPMathInst = Requirements.getExactFPInst();
10017       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10018                                                  ExactFPMathInst->getDebugLoc(),
10019                                                  ExactFPMathInst->getParent())
10020              << "loop not vectorized: cannot prove it is safe to reorder "
10021                 "floating-point operations";
10022     });
10023     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10024                          "reorder floating-point operations\n");
10025     Hints.emitRemarkWithHints();
10026     return false;
10027   }
10028 
10029   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10030   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10031 
10032   // If an override option has been passed in for interleaved accesses, use it.
10033   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10034     UseInterleaved = EnableInterleavedMemAccesses;
10035 
10036   // Analyze interleaved memory accesses.
10037   if (UseInterleaved) {
10038     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10039   }
10040 
10041   // Use the cost model.
10042   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10043                                 F, &Hints, IAI);
10044   CM.collectValuesToIgnore();
10045   CM.collectElementTypesForWidening();
10046 
10047   // Use the planner for vectorization.
10048   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10049                                Requirements, ORE);
10050 
10051   // Get user vectorization factor and interleave count.
10052   ElementCount UserVF = Hints.getWidth();
10053   unsigned UserIC = Hints.getInterleave();
10054 
10055   // Plan how to best vectorize, return the best VF and its cost.
10056   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10057 
10058   VectorizationFactor VF = VectorizationFactor::Disabled();
10059   unsigned IC = 1;
10060 
10061   if (MaybeVF) {
10062     VF = *MaybeVF;
10063     // Select the interleave count.
10064     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10065   }
10066 
10067   // Identify the diagnostic messages that should be produced.
10068   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10069   bool VectorizeLoop = true, InterleaveLoop = true;
10070   if (VF.Width.isScalar()) {
10071     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10072     VecDiagMsg = std::make_pair(
10073         "VectorizationNotBeneficial",
10074         "the cost-model indicates that vectorization is not beneficial");
10075     VectorizeLoop = false;
10076   }
10077 
10078   if (!MaybeVF && UserIC > 1) {
10079     // Tell the user interleaving was avoided up-front, despite being explicitly
10080     // requested.
10081     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10082                          "interleaving should be avoided up front\n");
10083     IntDiagMsg = std::make_pair(
10084         "InterleavingAvoided",
10085         "Ignoring UserIC, because interleaving was avoided up front");
10086     InterleaveLoop = false;
10087   } else if (IC == 1 && UserIC <= 1) {
10088     // Tell the user interleaving is not beneficial.
10089     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10090     IntDiagMsg = std::make_pair(
10091         "InterleavingNotBeneficial",
10092         "the cost-model indicates that interleaving is not beneficial");
10093     InterleaveLoop = false;
10094     if (UserIC == 1) {
10095       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10096       IntDiagMsg.second +=
10097           " and is explicitly disabled or interleave count is set to 1";
10098     }
10099   } else if (IC > 1 && UserIC == 1) {
10100     // Tell the user interleaving is beneficial, but it explicitly disabled.
10101     LLVM_DEBUG(
10102         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10103     IntDiagMsg = std::make_pair(
10104         "InterleavingBeneficialButDisabled",
10105         "the cost-model indicates that interleaving is beneficial "
10106         "but is explicitly disabled or interleave count is set to 1");
10107     InterleaveLoop = false;
10108   }
10109 
10110   // Override IC if user provided an interleave count.
10111   IC = UserIC > 0 ? UserIC : IC;
10112 
10113   // Emit diagnostic messages, if any.
10114   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10115   if (!VectorizeLoop && !InterleaveLoop) {
10116     // Do not vectorize or interleaving the loop.
10117     ORE->emit([&]() {
10118       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10119                                       L->getStartLoc(), L->getHeader())
10120              << VecDiagMsg.second;
10121     });
10122     ORE->emit([&]() {
10123       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10124                                       L->getStartLoc(), L->getHeader())
10125              << IntDiagMsg.second;
10126     });
10127     return false;
10128   } else if (!VectorizeLoop && InterleaveLoop) {
10129     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10130     ORE->emit([&]() {
10131       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10132                                         L->getStartLoc(), L->getHeader())
10133              << VecDiagMsg.second;
10134     });
10135   } else if (VectorizeLoop && !InterleaveLoop) {
10136     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10137                       << ") in " << DebugLocStr << '\n');
10138     ORE->emit([&]() {
10139       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10140                                         L->getStartLoc(), L->getHeader())
10141              << IntDiagMsg.second;
10142     });
10143   } else if (VectorizeLoop && InterleaveLoop) {
10144     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10145                       << ") in " << DebugLocStr << '\n');
10146     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10147   }
10148 
10149   bool DisableRuntimeUnroll = false;
10150   MDNode *OrigLoopID = L->getLoopID();
10151   {
10152     // Optimistically generate runtime checks. Drop them if they turn out to not
10153     // be profitable. Limit the scope of Checks, so the cleanup happens
10154     // immediately after vector codegeneration is done.
10155     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10156                              F->getParent()->getDataLayout());
10157     if (!VF.Width.isScalar() || IC > 1)
10158       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10159     LVP.setBestPlan(VF.Width, IC);
10160 
10161     using namespace ore;
10162     if (!VectorizeLoop) {
10163       assert(IC > 1 && "interleave count should not be 1 or 0");
10164       // If we decided that it is not legal to vectorize the loop, then
10165       // interleave it.
10166       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10167                                  &CM, BFI, PSI, Checks);
10168       LVP.executePlan(Unroller, DT);
10169 
10170       ORE->emit([&]() {
10171         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10172                                   L->getHeader())
10173                << "interleaved loop (interleaved count: "
10174                << NV("InterleaveCount", IC) << ")";
10175       });
10176     } else {
10177       // If we decided that it is *legal* to vectorize the loop, then do it.
10178 
10179       // Consider vectorizing the epilogue too if it's profitable.
10180       VectorizationFactor EpilogueVF =
10181           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10182       if (EpilogueVF.Width.isVector()) {
10183 
10184         // The first pass vectorizes the main loop and creates a scalar epilogue
10185         // to be vectorized by executing the plan (potentially with a different
10186         // factor) again shortly afterwards.
10187         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
10188                                           EpilogueVF.Width.getKnownMinValue(),
10189                                           1);
10190         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10191                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10192 
10193         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
10194         LVP.executePlan(MainILV, DT);
10195         ++LoopsVectorized;
10196 
10197         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10198         formLCSSARecursively(*L, *DT, LI, SE);
10199 
10200         // Second pass vectorizes the epilogue and adjusts the control flow
10201         // edges from the first pass.
10202         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
10203         EPI.MainLoopVF = EPI.EpilogueVF;
10204         EPI.MainLoopUF = EPI.EpilogueUF;
10205         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10206                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10207                                                  Checks);
10208         LVP.executePlan(EpilogILV, DT);
10209         ++LoopsEpilogueVectorized;
10210 
10211         if (!MainILV.areSafetyChecksAdded())
10212           DisableRuntimeUnroll = true;
10213       } else {
10214         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10215                                &LVL, &CM, BFI, PSI, Checks);
10216         LVP.executePlan(LB, DT);
10217         ++LoopsVectorized;
10218 
10219         // Add metadata to disable runtime unrolling a scalar loop when there
10220         // are no runtime checks about strides and memory. A scalar loop that is
10221         // rarely used is not worth unrolling.
10222         if (!LB.areSafetyChecksAdded())
10223           DisableRuntimeUnroll = true;
10224       }
10225       // Report the vectorization decision.
10226       ORE->emit([&]() {
10227         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10228                                   L->getHeader())
10229                << "vectorized loop (vectorization width: "
10230                << NV("VectorizationFactor", VF.Width)
10231                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10232       });
10233     }
10234 
10235     if (ORE->allowExtraAnalysis(LV_NAME))
10236       checkMixedPrecision(L, ORE);
10237   }
10238 
10239   Optional<MDNode *> RemainderLoopID =
10240       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10241                                       LLVMLoopVectorizeFollowupEpilogue});
10242   if (RemainderLoopID.hasValue()) {
10243     L->setLoopID(RemainderLoopID.getValue());
10244   } else {
10245     if (DisableRuntimeUnroll)
10246       AddRuntimeUnrollDisableMetaData(L);
10247 
10248     // Mark the loop as already vectorized to avoid vectorizing again.
10249     Hints.setAlreadyVectorized();
10250   }
10251 
10252   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10253   return true;
10254 }
10255 
10256 LoopVectorizeResult LoopVectorizePass::runImpl(
10257     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10258     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10259     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10260     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10261     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10262   SE = &SE_;
10263   LI = &LI_;
10264   TTI = &TTI_;
10265   DT = &DT_;
10266   BFI = &BFI_;
10267   TLI = TLI_;
10268   AA = &AA_;
10269   AC = &AC_;
10270   GetLAA = &GetLAA_;
10271   DB = &DB_;
10272   ORE = &ORE_;
10273   PSI = PSI_;
10274 
10275   // Don't attempt if
10276   // 1. the target claims to have no vector registers, and
10277   // 2. interleaving won't help ILP.
10278   //
10279   // The second condition is necessary because, even if the target has no
10280   // vector registers, loop vectorization may still enable scalar
10281   // interleaving.
10282   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10283       TTI->getMaxInterleaveFactor(1) < 2)
10284     return LoopVectorizeResult(false, false);
10285 
10286   bool Changed = false, CFGChanged = false;
10287 
10288   // The vectorizer requires loops to be in simplified form.
10289   // Since simplification may add new inner loops, it has to run before the
10290   // legality and profitability checks. This means running the loop vectorizer
10291   // will simplify all loops, regardless of whether anything end up being
10292   // vectorized.
10293   for (auto &L : *LI)
10294     Changed |= CFGChanged |=
10295         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10296 
10297   // Build up a worklist of inner-loops to vectorize. This is necessary as
10298   // the act of vectorizing or partially unrolling a loop creates new loops
10299   // and can invalidate iterators across the loops.
10300   SmallVector<Loop *, 8> Worklist;
10301 
10302   for (Loop *L : *LI)
10303     collectSupportedLoops(*L, LI, ORE, Worklist);
10304 
10305   LoopsAnalyzed += Worklist.size();
10306 
10307   // Now walk the identified inner loops.
10308   while (!Worklist.empty()) {
10309     Loop *L = Worklist.pop_back_val();
10310 
10311     // For the inner loops we actually process, form LCSSA to simplify the
10312     // transform.
10313     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10314 
10315     Changed |= CFGChanged |= processLoop(L);
10316   }
10317 
10318   // Process each loop nest in the function.
10319   return LoopVectorizeResult(Changed, CFGChanged);
10320 }
10321 
10322 PreservedAnalyses LoopVectorizePass::run(Function &F,
10323                                          FunctionAnalysisManager &AM) {
10324     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10325     auto &LI = AM.getResult<LoopAnalysis>(F);
10326     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10327     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10328     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10329     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10330     auto &AA = AM.getResult<AAManager>(F);
10331     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10332     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10333     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10334     MemorySSA *MSSA = EnableMSSALoopDependency
10335                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
10336                           : nullptr;
10337 
10338     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10339     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10340         [&](Loop &L) -> const LoopAccessInfo & {
10341       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
10342                                         TLI, TTI, nullptr, MSSA};
10343       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10344     };
10345     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10346     ProfileSummaryInfo *PSI =
10347         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10348     LoopVectorizeResult Result =
10349         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10350     if (!Result.MadeAnyChange)
10351       return PreservedAnalyses::all();
10352     PreservedAnalyses PA;
10353 
10354     // We currently do not preserve loopinfo/dominator analyses with outer loop
10355     // vectorization. Until this is addressed, mark these analyses as preserved
10356     // only for non-VPlan-native path.
10357     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10358     if (!EnableVPlanNativePath) {
10359       PA.preserve<LoopAnalysis>();
10360       PA.preserve<DominatorTreeAnalysis>();
10361     }
10362     if (!Result.MadeCFGChange)
10363       PA.preserveSet<CFGAnalyses>();
10364     return PA;
10365 }
10366