1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/PatternMatch.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/InstructionCost.h"
135 #include "llvm/Support/MathExtras.h"
136 #include "llvm/Support/raw_ostream.h"
137 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
138 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
139 #include "llvm/Transforms/Utils/LoopSimplify.h"
140 #include "llvm/Transforms/Utils/LoopUtils.h"
141 #include "llvm/Transforms/Utils/LoopVersioning.h"
142 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
143 #include "llvm/Transforms/Utils/SizeOpts.h"
144 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
145 #include <algorithm>
146 #include <cassert>
147 #include <cstdint>
148 #include <cstdlib>
149 #include <functional>
150 #include <iterator>
151 #include <limits>
152 #include <memory>
153 #include <string>
154 #include <tuple>
155 #include <utility>
156 
157 using namespace llvm;
158 
159 #define LV_NAME "loop-vectorize"
160 #define DEBUG_TYPE LV_NAME
161 
162 #ifndef NDEBUG
163 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
164 #endif
165 
166 /// @{
167 /// Metadata attribute names
168 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
169 const char LLVMLoopVectorizeFollowupVectorized[] =
170     "llvm.loop.vectorize.followup_vectorized";
171 const char LLVMLoopVectorizeFollowupEpilogue[] =
172     "llvm.loop.vectorize.followup_epilogue";
173 /// @}
174 
175 STATISTIC(LoopsVectorized, "Number of loops vectorized");
176 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
177 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
178 
179 static cl::opt<bool> EnableEpilogueVectorization(
180     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
181     cl::desc("Enable vectorization of epilogue loops."));
182 
183 static cl::opt<unsigned> EpilogueVectorizationForceVF(
184     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
185     cl::desc("When epilogue vectorization is enabled, and a value greater than "
186              "1 is specified, forces the given VF for all applicable epilogue "
187              "loops."));
188 
189 static cl::opt<unsigned> EpilogueVectorizationMinVF(
190     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
191     cl::desc("Only loops with vectorization factor equal to or larger than "
192              "the specified value are considered for epilogue vectorization."));
193 
194 /// Loops with a known constant trip count below this number are vectorized only
195 /// if no scalar iteration overheads are incurred.
196 static cl::opt<unsigned> TinyTripCountVectorThreshold(
197     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
198     cl::desc("Loops with a constant trip count that is smaller than this "
199              "value are vectorized only if no scalar iteration overheads "
200              "are incurred."));
201 
202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
203     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
204     cl::desc("The maximum allowed number of runtime memory checks with a "
205              "vectorize(enable) pragma."));
206 
207 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
208 // that predication is preferred, and this lists all options. I.e., the
209 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
210 // and predicate the instructions accordingly. If tail-folding fails, there are
211 // different fallback strategies depending on these values:
212 namespace PreferPredicateTy {
213   enum Option {
214     ScalarEpilogue = 0,
215     PredicateElseScalarEpilogue,
216     PredicateOrDontVectorize
217   };
218 } // namespace PreferPredicateTy
219 
220 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
221     "prefer-predicate-over-epilogue",
222     cl::init(PreferPredicateTy::ScalarEpilogue),
223     cl::Hidden,
224     cl::desc("Tail-folding and predication preferences over creating a scalar "
225              "epilogue loop."),
226     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
227                          "scalar-epilogue",
228                          "Don't tail-predicate loops, create scalar epilogue"),
229               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
230                          "predicate-else-scalar-epilogue",
231                          "prefer tail-folding, create scalar epilogue if tail "
232                          "folding fails."),
233               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
234                          "predicate-dont-vectorize",
235                          "prefers tail-folding, don't attempt vectorization if "
236                          "tail-folding fails.")));
237 
238 static cl::opt<bool> MaximizeBandwidth(
239     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
240     cl::desc("Maximize bandwidth when selecting vectorization factor which "
241              "will be determined by the smallest type in loop."));
242 
243 static cl::opt<bool> EnableInterleavedMemAccesses(
244     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
246 
247 /// An interleave-group may need masking if it resides in a block that needs
248 /// predication, or in order to mask away gaps.
249 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
250     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
251     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
252 
253 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
254     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
255     cl::desc("We don't interleave loops with a estimated constant trip count "
256              "below this number"));
257 
258 static cl::opt<unsigned> ForceTargetNumScalarRegs(
259     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
260     cl::desc("A flag that overrides the target's number of scalar registers."));
261 
262 static cl::opt<unsigned> ForceTargetNumVectorRegs(
263     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
264     cl::desc("A flag that overrides the target's number of vector registers."));
265 
266 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
267     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
268     cl::desc("A flag that overrides the target's max interleave factor for "
269              "scalar loops."));
270 
271 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
272     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
273     cl::desc("A flag that overrides the target's max interleave factor for "
274              "vectorized loops."));
275 
276 static cl::opt<unsigned> ForceTargetInstructionCost(
277     "force-target-instruction-cost", cl::init(0), cl::Hidden,
278     cl::desc("A flag that overrides the target's expected cost for "
279              "an instruction to a single constant value. Mostly "
280              "useful for getting consistent testing."));
281 
282 static cl::opt<bool> ForceTargetSupportsScalableVectors(
283     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
284     cl::desc(
285         "Pretend that scalable vectors are supported, even if the target does "
286         "not support them. This flag should only be used for testing."));
287 
288 static cl::opt<unsigned> SmallLoopCost(
289     "small-loop-cost", cl::init(20), cl::Hidden,
290     cl::desc(
291         "The cost of a loop that is considered 'small' by the interleaver."));
292 
293 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
294     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
295     cl::desc("Enable the use of the block frequency analysis to access PGO "
296              "heuristics minimizing code growth in cold regions and being more "
297              "aggressive in hot regions."));
298 
299 // Runtime interleave loops for load/store throughput.
300 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
301     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
302     cl::desc(
303         "Enable runtime interleaving until load/store ports are saturated"));
304 
305 /// Interleave small loops with scalar reductions.
306 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
307     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
308     cl::desc("Enable interleaving for loops with small iteration counts that "
309              "contain scalar reductions to expose ILP."));
310 
311 /// The number of stores in a loop that are allowed to need predication.
312 static cl::opt<unsigned> NumberOfStoresToPredicate(
313     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
314     cl::desc("Max number of stores to be predicated behind an if."));
315 
316 static cl::opt<bool> EnableIndVarRegisterHeur(
317     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
318     cl::desc("Count the induction variable only once when interleaving"));
319 
320 static cl::opt<bool> EnableCondStoresVectorization(
321     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
322     cl::desc("Enable if predication of stores during vectorization."));
323 
324 static cl::opt<unsigned> MaxNestedScalarReductionIC(
325     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
326     cl::desc("The maximum interleave count to use when interleaving a scalar "
327              "reduction in a nested loop."));
328 
329 static cl::opt<bool>
330     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
331                            cl::Hidden,
332                            cl::desc("Prefer in-loop vector reductions, "
333                                     "overriding the targets preference."));
334 
335 cl::opt<bool> EnableStrictReductions(
336     "enable-strict-reductions", cl::init(false), cl::Hidden,
337     cl::desc("Enable the vectorisation of loops with in-order (strict) "
338              "FP reductions"));
339 
340 static cl::opt<bool> PreferPredicatedReductionSelect(
341     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
342     cl::desc(
343         "Prefer predicating a reduction operation over an after loop select."));
344 
345 cl::opt<bool> EnableVPlanNativePath(
346     "enable-vplan-native-path", cl::init(false), cl::Hidden,
347     cl::desc("Enable VPlan-native vectorization path with "
348              "support for outer loop vectorization."));
349 
350 // FIXME: Remove this switch once we have divergence analysis. Currently we
351 // assume divergent non-backedge branches when this switch is true.
352 cl::opt<bool> EnableVPlanPredication(
353     "enable-vplan-predication", cl::init(false), cl::Hidden,
354     cl::desc("Enable VPlan-native vectorization path predicator with "
355              "support for outer loop vectorization."));
356 
357 // This flag enables the stress testing of the VPlan H-CFG construction in the
358 // VPlan-native vectorization path. It must be used in conjuction with
359 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
360 // verification of the H-CFGs built.
361 static cl::opt<bool> VPlanBuildStressTest(
362     "vplan-build-stress-test", cl::init(false), cl::Hidden,
363     cl::desc(
364         "Build VPlan for every supported loop nest in the function and bail "
365         "out right after the build (stress test the VPlan H-CFG construction "
366         "in the VPlan-native vectorization path)."));
367 
368 cl::opt<bool> llvm::EnableLoopInterleaving(
369     "interleave-loops", cl::init(true), cl::Hidden,
370     cl::desc("Enable loop interleaving in Loop vectorization passes"));
371 cl::opt<bool> llvm::EnableLoopVectorization(
372     "vectorize-loops", cl::init(true), cl::Hidden,
373     cl::desc("Run the Loop vectorization passes"));
374 
375 cl::opt<bool> PrintVPlansInDotFormat(
376     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
377     cl::desc("Use dot format instead of plain text when dumping VPlans"));
378 
379 /// A helper function that returns true if the given type is irregular. The
380 /// type is irregular if its allocated size doesn't equal the store size of an
381 /// element of the corresponding vector type.
382 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
383   // Determine if an array of N elements of type Ty is "bitcast compatible"
384   // with a <N x Ty> vector.
385   // This is only true if there is no padding between the array elements.
386   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
387 }
388 
389 /// A helper function that returns the reciprocal of the block probability of
390 /// predicated blocks. If we return X, we are assuming the predicated block
391 /// will execute once for every X iterations of the loop header.
392 ///
393 /// TODO: We should use actual block probability here, if available. Currently,
394 ///       we always assume predicated blocks have a 50% chance of executing.
395 static unsigned getReciprocalPredBlockProb() { return 2; }
396 
397 /// A helper function that returns an integer or floating-point constant with
398 /// value C.
399 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
400   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
401                            : ConstantFP::get(Ty, C);
402 }
403 
404 /// Returns "best known" trip count for the specified loop \p L as defined by
405 /// the following procedure:
406 ///   1) Returns exact trip count if it is known.
407 ///   2) Returns expected trip count according to profile data if any.
408 ///   3) Returns upper bound estimate if it is known.
409 ///   4) Returns None if all of the above failed.
410 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
411   // Check if exact trip count is known.
412   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
413     return ExpectedTC;
414 
415   // Check if there is an expected trip count available from profile data.
416   if (LoopVectorizeWithBlockFrequency)
417     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
418       return EstimatedTC;
419 
420   // Check if upper bound estimate is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
422     return ExpectedTC;
423 
424   return None;
425 }
426 
427 // Forward declare GeneratedRTChecks.
428 class GeneratedRTChecks;
429 
430 namespace llvm {
431 
432 /// InnerLoopVectorizer vectorizes loops which contain only one basic
433 /// block to a specified vectorization factor (VF).
434 /// This class performs the widening of scalars into vectors, or multiple
435 /// scalars. This class also implements the following features:
436 /// * It inserts an epilogue loop for handling loops that don't have iteration
437 ///   counts that are known to be a multiple of the vectorization factor.
438 /// * It handles the code generation for reduction variables.
439 /// * Scalarization (implementation using scalars) of un-vectorizable
440 ///   instructions.
441 /// InnerLoopVectorizer does not perform any vectorization-legality
442 /// checks, and relies on the caller to check for the different legality
443 /// aspects. The InnerLoopVectorizer relies on the
444 /// LoopVectorizationLegality class to provide information about the induction
445 /// and reduction variables that were found to a given vectorization factor.
446 class InnerLoopVectorizer {
447 public:
448   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
449                       LoopInfo *LI, DominatorTree *DT,
450                       const TargetLibraryInfo *TLI,
451                       const TargetTransformInfo *TTI, AssumptionCache *AC,
452                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
453                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
454                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
455                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
456       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
457         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
458         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
459         PSI(PSI), RTChecks(RTChecks) {
460     // Query this against the original loop and save it here because the profile
461     // of the original loop header may change as the transformation happens.
462     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
463         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
464   }
465 
466   virtual ~InnerLoopVectorizer() = default;
467 
468   /// Create a new empty loop that will contain vectorized instructions later
469   /// on, while the old loop will be used as the scalar remainder. Control flow
470   /// is generated around the vectorized (and scalar epilogue) loops consisting
471   /// of various checks and bypasses. Return the pre-header block of the new
472   /// loop.
473   /// In the case of epilogue vectorization, this function is overriden to
474   /// handle the more complex control flow around the loops.
475   virtual BasicBlock *createVectorizedLoopSkeleton();
476 
477   /// Widen a single instruction within the innermost loop.
478   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
479                         VPTransformState &State);
480 
481   /// Widen a single call instruction within the innermost loop.
482   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
483                             VPTransformState &State);
484 
485   /// Widen a single select instruction within the innermost loop.
486   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
487                               bool InvariantCond, VPTransformState &State);
488 
489   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
490   void fixVectorizedLoop(VPTransformState &State);
491 
492   // Return true if any runtime check is added.
493   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
494 
495   /// A type for vectorized values in the new loop. Each value from the
496   /// original loop, when vectorized, is represented by UF vector values in the
497   /// new unrolled loop, where UF is the unroll factor.
498   using VectorParts = SmallVector<Value *, 2>;
499 
500   /// Vectorize a single GetElementPtrInst based on information gathered and
501   /// decisions taken during planning.
502   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
503                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
504                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
505 
506   /// Vectorize a single PHINode in a block. This method handles the induction
507   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
508   /// arbitrary length vectors.
509   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
510                            VPWidenPHIRecipe *PhiR, VPTransformState &State);
511 
512   /// A helper function to scalarize a single Instruction in the innermost loop.
513   /// Generates a sequence of scalar instances for each lane between \p MinLane
514   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
515   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
516   /// Instr's operands.
517   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
518                             const VPIteration &Instance, bool IfPredicateInstr,
519                             VPTransformState &State);
520 
521   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
522   /// is provided, the integer induction variable will first be truncated to
523   /// the corresponding type.
524   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
525                              VPValue *Def, VPValue *CastDef,
526                              VPTransformState &State);
527 
528   /// Construct the vector value of a scalarized value \p V one lane at a time.
529   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
530                                  VPTransformState &State);
531 
532   /// Try to vectorize interleaved access group \p Group with the base address
533   /// given in \p Addr, optionally masking the vector operations if \p
534   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
535   /// values in the vectorized loop.
536   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
537                                 ArrayRef<VPValue *> VPDefs,
538                                 VPTransformState &State, VPValue *Addr,
539                                 ArrayRef<VPValue *> StoredValues,
540                                 VPValue *BlockInMask = nullptr);
541 
542   /// Vectorize Load and Store instructions with the base address given in \p
543   /// Addr, optionally masking the vector operations if \p BlockInMask is
544   /// non-null. Use \p State to translate given VPValues to IR values in the
545   /// vectorized loop.
546   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
547                                   VPValue *Def, VPValue *Addr,
548                                   VPValue *StoredValue, VPValue *BlockInMask);
549 
550   /// Set the debug location in the builder using the debug location in
551   /// the instruction.
552   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
553 
554   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
555   void fixNonInductionPHIs(VPTransformState &State);
556 
557   /// Returns true if the reordering of FP operations is not allowed, but we are
558   /// able to vectorize with strict in-order reductions for the given RdxDesc.
559   bool useOrderedReductions(RecurrenceDescriptor &RdxDesc);
560 
561   /// Create a broadcast instruction. This method generates a broadcast
562   /// instruction (shuffle) for loop invariant values and for the induction
563   /// value. If this is the induction variable then we extend it to N, N+1, ...
564   /// this is needed because each iteration in the loop corresponds to a SIMD
565   /// element.
566   virtual Value *getBroadcastInstrs(Value *V);
567 
568 protected:
569   friend class LoopVectorizationPlanner;
570 
571   /// A small list of PHINodes.
572   using PhiVector = SmallVector<PHINode *, 4>;
573 
574   /// A type for scalarized values in the new loop. Each value from the
575   /// original loop, when scalarized, is represented by UF x VF scalar values
576   /// in the new unrolled loop, where UF is the unroll factor and VF is the
577   /// vectorization factor.
578   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
579 
580   /// Set up the values of the IVs correctly when exiting the vector loop.
581   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
582                     Value *CountRoundDown, Value *EndValue,
583                     BasicBlock *MiddleBlock);
584 
585   /// Create a new induction variable inside L.
586   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
587                                    Value *Step, Instruction *DL);
588 
589   /// Handle all cross-iteration phis in the header.
590   void fixCrossIterationPHIs(VPTransformState &State);
591 
592   /// Fix a first-order recurrence. This is the second phase of vectorizing
593   /// this phi node.
594   void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State);
595 
596   /// Fix a reduction cross-iteration phi. This is the second phase of
597   /// vectorizing this phi node.
598   void fixReduction(VPWidenPHIRecipe *Phi, VPTransformState &State);
599 
600   /// Clear NSW/NUW flags from reduction instructions if necessary.
601   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
602                                VPTransformState &State);
603 
604   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
605   /// means we need to add the appropriate incoming value from the middle
606   /// block as exiting edges from the scalar epilogue loop (if present) are
607   /// already in place, and we exit the vector loop exclusively to the middle
608   /// block.
609   void fixLCSSAPHIs(VPTransformState &State);
610 
611   /// Iteratively sink the scalarized operands of a predicated instruction into
612   /// the block that was created for it.
613   void sinkScalarOperands(Instruction *PredInst);
614 
615   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
616   /// represented as.
617   void truncateToMinimalBitwidths(VPTransformState &State);
618 
619   /// This function adds
620   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
621   /// to each vector element of Val. The sequence starts at StartIndex.
622   /// \p Opcode is relevant for FP induction variable.
623   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
624                                Instruction::BinaryOps Opcode =
625                                Instruction::BinaryOpsEnd);
626 
627   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
628   /// variable on which to base the steps, \p Step is the size of the step, and
629   /// \p EntryVal is the value from the original loop that maps to the steps.
630   /// Note that \p EntryVal doesn't have to be an induction variable - it
631   /// can also be a truncate instruction.
632   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
633                         const InductionDescriptor &ID, VPValue *Def,
634                         VPValue *CastDef, VPTransformState &State);
635 
636   /// Create a vector induction phi node based on an existing scalar one. \p
637   /// EntryVal is the value from the original loop that maps to the vector phi
638   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
639   /// truncate instruction, instead of widening the original IV, we widen a
640   /// version of the IV truncated to \p EntryVal's type.
641   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
642                                        Value *Step, Value *Start,
643                                        Instruction *EntryVal, VPValue *Def,
644                                        VPValue *CastDef,
645                                        VPTransformState &State);
646 
647   /// Returns true if an instruction \p I should be scalarized instead of
648   /// vectorized for the chosen vectorization factor.
649   bool shouldScalarizeInstruction(Instruction *I) const;
650 
651   /// Returns true if we should generate a scalar version of \p IV.
652   bool needsScalarInduction(Instruction *IV) const;
653 
654   /// If there is a cast involved in the induction variable \p ID, which should
655   /// be ignored in the vectorized loop body, this function records the
656   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
657   /// cast. We had already proved that the casted Phi is equal to the uncasted
658   /// Phi in the vectorized loop (under a runtime guard), and therefore
659   /// there is no need to vectorize the cast - the same value can be used in the
660   /// vector loop for both the Phi and the cast.
661   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
662   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
663   ///
664   /// \p EntryVal is the value from the original loop that maps to the vector
665   /// phi node and is used to distinguish what is the IV currently being
666   /// processed - original one (if \p EntryVal is a phi corresponding to the
667   /// original IV) or the "newly-created" one based on the proof mentioned above
668   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
669   /// latter case \p EntryVal is a TruncInst and we must not record anything for
670   /// that IV, but it's error-prone to expect callers of this routine to care
671   /// about that, hence this explicit parameter.
672   void recordVectorLoopValueForInductionCast(
673       const InductionDescriptor &ID, const Instruction *EntryVal,
674       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
675       unsigned Part, unsigned Lane = UINT_MAX);
676 
677   /// Generate a shuffle sequence that will reverse the vector Vec.
678   virtual Value *reverseVector(Value *Vec);
679 
680   /// Returns (and creates if needed) the original loop trip count.
681   Value *getOrCreateTripCount(Loop *NewLoop);
682 
683   /// Returns (and creates if needed) the trip count of the widened loop.
684   Value *getOrCreateVectorTripCount(Loop *NewLoop);
685 
686   /// Returns a bitcasted value to the requested vector type.
687   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
688   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
689                                 const DataLayout &DL);
690 
691   /// Emit a bypass check to see if the vector trip count is zero, including if
692   /// it overflows.
693   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
694 
695   /// Emit a bypass check to see if all of the SCEV assumptions we've
696   /// had to make are correct. Returns the block containing the checks or
697   /// nullptr if no checks have been added.
698   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
699 
700   /// Emit bypass checks to check any memory assumptions we may have made.
701   /// Returns the block containing the checks or nullptr if no checks have been
702   /// added.
703   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
704 
705   /// Compute the transformed value of Index at offset StartValue using step
706   /// StepValue.
707   /// For integer induction, returns StartValue + Index * StepValue.
708   /// For pointer induction, returns StartValue[Index * StepValue].
709   /// FIXME: The newly created binary instructions should contain nsw/nuw
710   /// flags, which can be found from the original scalar operations.
711   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
712                               const DataLayout &DL,
713                               const InductionDescriptor &ID) const;
714 
715   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
716   /// vector loop preheader, middle block and scalar preheader. Also
717   /// allocate a loop object for the new vector loop and return it.
718   Loop *createVectorLoopSkeleton(StringRef Prefix);
719 
720   /// Create new phi nodes for the induction variables to resume iteration count
721   /// in the scalar epilogue, from where the vectorized loop left off (given by
722   /// \p VectorTripCount).
723   /// In cases where the loop skeleton is more complicated (eg. epilogue
724   /// vectorization) and the resume values can come from an additional bypass
725   /// block, the \p AdditionalBypass pair provides information about the bypass
726   /// block and the end value on the edge from bypass to this loop.
727   void createInductionResumeValues(
728       Loop *L, Value *VectorTripCount,
729       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
730 
731   /// Complete the loop skeleton by adding debug MDs, creating appropriate
732   /// conditional branches in the middle block, preparing the builder and
733   /// running the verifier. Take in the vector loop \p L as argument, and return
734   /// the preheader of the completed vector loop.
735   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
736 
737   /// Add additional metadata to \p To that was not present on \p Orig.
738   ///
739   /// Currently this is used to add the noalias annotations based on the
740   /// inserted memchecks.  Use this for instructions that are *cloned* into the
741   /// vector loop.
742   void addNewMetadata(Instruction *To, const Instruction *Orig);
743 
744   /// Add metadata from one instruction to another.
745   ///
746   /// This includes both the original MDs from \p From and additional ones (\see
747   /// addNewMetadata).  Use this for *newly created* instructions in the vector
748   /// loop.
749   void addMetadata(Instruction *To, Instruction *From);
750 
751   /// Similar to the previous function but it adds the metadata to a
752   /// vector of instructions.
753   void addMetadata(ArrayRef<Value *> To, Instruction *From);
754 
755   /// Allow subclasses to override and print debug traces before/after vplan
756   /// execution, when trace information is requested.
757   virtual void printDebugTracesAtStart(){};
758   virtual void printDebugTracesAtEnd(){};
759 
760   /// The original loop.
761   Loop *OrigLoop;
762 
763   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
764   /// dynamic knowledge to simplify SCEV expressions and converts them to a
765   /// more usable form.
766   PredicatedScalarEvolution &PSE;
767 
768   /// Loop Info.
769   LoopInfo *LI;
770 
771   /// Dominator Tree.
772   DominatorTree *DT;
773 
774   /// Alias Analysis.
775   AAResults *AA;
776 
777   /// Target Library Info.
778   const TargetLibraryInfo *TLI;
779 
780   /// Target Transform Info.
781   const TargetTransformInfo *TTI;
782 
783   /// Assumption Cache.
784   AssumptionCache *AC;
785 
786   /// Interface to emit optimization remarks.
787   OptimizationRemarkEmitter *ORE;
788 
789   /// LoopVersioning.  It's only set up (non-null) if memchecks were
790   /// used.
791   ///
792   /// This is currently only used to add no-alias metadata based on the
793   /// memchecks.  The actually versioning is performed manually.
794   std::unique_ptr<LoopVersioning> LVer;
795 
796   /// The vectorization SIMD factor to use. Each vector will have this many
797   /// vector elements.
798   ElementCount VF;
799 
800   /// The vectorization unroll factor to use. Each scalar is vectorized to this
801   /// many different vector instructions.
802   unsigned UF;
803 
804   /// The builder that we use
805   IRBuilder<> Builder;
806 
807   // --- Vectorization state ---
808 
809   /// The vector-loop preheader.
810   BasicBlock *LoopVectorPreHeader;
811 
812   /// The scalar-loop preheader.
813   BasicBlock *LoopScalarPreHeader;
814 
815   /// Middle Block between the vector and the scalar.
816   BasicBlock *LoopMiddleBlock;
817 
818   /// The (unique) ExitBlock of the scalar loop.  Note that
819   /// there can be multiple exiting edges reaching this block.
820   BasicBlock *LoopExitBlock;
821 
822   /// The vector loop body.
823   BasicBlock *LoopVectorBody;
824 
825   /// The scalar loop body.
826   BasicBlock *LoopScalarBody;
827 
828   /// A list of all bypass blocks. The first block is the entry of the loop.
829   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
830 
831   /// The new Induction variable which was added to the new block.
832   PHINode *Induction = nullptr;
833 
834   /// The induction variable of the old basic block.
835   PHINode *OldInduction = nullptr;
836 
837   /// Store instructions that were predicated.
838   SmallVector<Instruction *, 4> PredicatedInstructions;
839 
840   /// Trip count of the original loop.
841   Value *TripCount = nullptr;
842 
843   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
844   Value *VectorTripCount = nullptr;
845 
846   /// The legality analysis.
847   LoopVectorizationLegality *Legal;
848 
849   /// The profitablity analysis.
850   LoopVectorizationCostModel *Cost;
851 
852   // Record whether runtime checks are added.
853   bool AddedSafetyChecks = false;
854 
855   // Holds the end values for each induction variable. We save the end values
856   // so we can later fix-up the external users of the induction variables.
857   DenseMap<PHINode *, Value *> IVEndValues;
858 
859   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
860   // fixed up at the end of vector code generation.
861   SmallVector<PHINode *, 8> OrigPHIsToFix;
862 
863   /// BFI and PSI are used to check for profile guided size optimizations.
864   BlockFrequencyInfo *BFI;
865   ProfileSummaryInfo *PSI;
866 
867   // Whether this loop should be optimized for size based on profile guided size
868   // optimizatios.
869   bool OptForSizeBasedOnProfile;
870 
871   /// Structure to hold information about generated runtime checks, responsible
872   /// for cleaning the checks, if vectorization turns out unprofitable.
873   GeneratedRTChecks &RTChecks;
874 };
875 
876 class InnerLoopUnroller : public InnerLoopVectorizer {
877 public:
878   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
879                     LoopInfo *LI, DominatorTree *DT,
880                     const TargetLibraryInfo *TLI,
881                     const TargetTransformInfo *TTI, AssumptionCache *AC,
882                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
883                     LoopVectorizationLegality *LVL,
884                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
885                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
886       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
887                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
888                             BFI, PSI, Check) {}
889 
890 private:
891   Value *getBroadcastInstrs(Value *V) override;
892   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
893                        Instruction::BinaryOps Opcode =
894                        Instruction::BinaryOpsEnd) override;
895   Value *reverseVector(Value *Vec) override;
896 };
897 
898 /// Encapsulate information regarding vectorization of a loop and its epilogue.
899 /// This information is meant to be updated and used across two stages of
900 /// epilogue vectorization.
901 struct EpilogueLoopVectorizationInfo {
902   ElementCount MainLoopVF = ElementCount::getFixed(0);
903   unsigned MainLoopUF = 0;
904   ElementCount EpilogueVF = ElementCount::getFixed(0);
905   unsigned EpilogueUF = 0;
906   BasicBlock *MainLoopIterationCountCheck = nullptr;
907   BasicBlock *EpilogueIterationCountCheck = nullptr;
908   BasicBlock *SCEVSafetyCheck = nullptr;
909   BasicBlock *MemSafetyCheck = nullptr;
910   Value *TripCount = nullptr;
911   Value *VectorTripCount = nullptr;
912 
913   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
914                                 unsigned EUF)
915       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
916         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
917     assert(EUF == 1 &&
918            "A high UF for the epilogue loop is likely not beneficial.");
919   }
920 };
921 
922 /// An extension of the inner loop vectorizer that creates a skeleton for a
923 /// vectorized loop that has its epilogue (residual) also vectorized.
924 /// The idea is to run the vplan on a given loop twice, firstly to setup the
925 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
926 /// from the first step and vectorize the epilogue.  This is achieved by
927 /// deriving two concrete strategy classes from this base class and invoking
928 /// them in succession from the loop vectorizer planner.
929 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
930 public:
931   InnerLoopAndEpilogueVectorizer(
932       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
933       DominatorTree *DT, const TargetLibraryInfo *TLI,
934       const TargetTransformInfo *TTI, AssumptionCache *AC,
935       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
936       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
937       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
938       GeneratedRTChecks &Checks)
939       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
940                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
941                             Checks),
942         EPI(EPI) {}
943 
944   // Override this function to handle the more complex control flow around the
945   // three loops.
946   BasicBlock *createVectorizedLoopSkeleton() final override {
947     return createEpilogueVectorizedLoopSkeleton();
948   }
949 
950   /// The interface for creating a vectorized skeleton using one of two
951   /// different strategies, each corresponding to one execution of the vplan
952   /// as described above.
953   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
954 
955   /// Holds and updates state information required to vectorize the main loop
956   /// and its epilogue in two separate passes. This setup helps us avoid
957   /// regenerating and recomputing runtime safety checks. It also helps us to
958   /// shorten the iteration-count-check path length for the cases where the
959   /// iteration count of the loop is so small that the main vector loop is
960   /// completely skipped.
961   EpilogueLoopVectorizationInfo &EPI;
962 };
963 
964 /// A specialized derived class of inner loop vectorizer that performs
965 /// vectorization of *main* loops in the process of vectorizing loops and their
966 /// epilogues.
967 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
968 public:
969   EpilogueVectorizerMainLoop(
970       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
971       DominatorTree *DT, const TargetLibraryInfo *TLI,
972       const TargetTransformInfo *TTI, AssumptionCache *AC,
973       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
974       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
975       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
976       GeneratedRTChecks &Check)
977       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
978                                        EPI, LVL, CM, BFI, PSI, Check) {}
979   /// Implements the interface for creating a vectorized skeleton using the
980   /// *main loop* strategy (ie the first pass of vplan execution).
981   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
982 
983 protected:
984   /// Emits an iteration count bypass check once for the main loop (when \p
985   /// ForEpilogue is false) and once for the epilogue loop (when \p
986   /// ForEpilogue is true).
987   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
988                                              bool ForEpilogue);
989   void printDebugTracesAtStart() override;
990   void printDebugTracesAtEnd() override;
991 };
992 
993 // A specialized derived class of inner loop vectorizer that performs
994 // vectorization of *epilogue* loops in the process of vectorizing loops and
995 // their epilogues.
996 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
997 public:
998   EpilogueVectorizerEpilogueLoop(
999       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1000       DominatorTree *DT, const TargetLibraryInfo *TLI,
1001       const TargetTransformInfo *TTI, AssumptionCache *AC,
1002       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1003       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1004       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1005       GeneratedRTChecks &Checks)
1006       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1007                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1008   /// Implements the interface for creating a vectorized skeleton using the
1009   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1010   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1011 
1012 protected:
1013   /// Emits an iteration count bypass check after the main vector loop has
1014   /// finished to see if there are any iterations left to execute by either
1015   /// the vector epilogue or the scalar epilogue.
1016   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1017                                                       BasicBlock *Bypass,
1018                                                       BasicBlock *Insert);
1019   void printDebugTracesAtStart() override;
1020   void printDebugTracesAtEnd() override;
1021 };
1022 } // end namespace llvm
1023 
1024 /// Look for a meaningful debug location on the instruction or it's
1025 /// operands.
1026 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1027   if (!I)
1028     return I;
1029 
1030   DebugLoc Empty;
1031   if (I->getDebugLoc() != Empty)
1032     return I;
1033 
1034   for (Use &Op : I->operands()) {
1035     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1036       if (OpInst->getDebugLoc() != Empty)
1037         return OpInst;
1038   }
1039 
1040   return I;
1041 }
1042 
1043 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1044   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1045     const DILocation *DIL = Inst->getDebugLoc();
1046 
1047     // When a FSDiscriminator is enabled, we don't need to add the multiply
1048     // factors to the discriminators.
1049     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1050         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1051       // FIXME: For scalable vectors, assume vscale=1.
1052       auto NewDIL =
1053           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1054       if (NewDIL)
1055         B.SetCurrentDebugLocation(NewDIL.getValue());
1056       else
1057         LLVM_DEBUG(dbgs()
1058                    << "Failed to create new discriminator: "
1059                    << DIL->getFilename() << " Line: " << DIL->getLine());
1060     } else
1061       B.SetCurrentDebugLocation(DIL);
1062   } else
1063     B.SetCurrentDebugLocation(DebugLoc());
1064 }
1065 
1066 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1067 /// is passed, the message relates to that particular instruction.
1068 #ifndef NDEBUG
1069 static void debugVectorizationMessage(const StringRef Prefix,
1070                                       const StringRef DebugMsg,
1071                                       Instruction *I) {
1072   dbgs() << "LV: " << Prefix << DebugMsg;
1073   if (I != nullptr)
1074     dbgs() << " " << *I;
1075   else
1076     dbgs() << '.';
1077   dbgs() << '\n';
1078 }
1079 #endif
1080 
1081 /// Create an analysis remark that explains why vectorization failed
1082 ///
1083 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1084 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1085 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1086 /// the location of the remark.  \return the remark object that can be
1087 /// streamed to.
1088 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1089     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1090   Value *CodeRegion = TheLoop->getHeader();
1091   DebugLoc DL = TheLoop->getStartLoc();
1092 
1093   if (I) {
1094     CodeRegion = I->getParent();
1095     // If there is no debug location attached to the instruction, revert back to
1096     // using the loop's.
1097     if (I->getDebugLoc())
1098       DL = I->getDebugLoc();
1099   }
1100 
1101   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1102 }
1103 
1104 /// Return a value for Step multiplied by VF.
1105 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1106   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1107   Constant *StepVal = ConstantInt::get(
1108       Step->getType(),
1109       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1110   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1111 }
1112 
1113 namespace llvm {
1114 
1115 /// Return the runtime value for VF.
1116 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1117   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1118   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1119 }
1120 
1121 void reportVectorizationFailure(const StringRef DebugMsg,
1122                                 const StringRef OREMsg, const StringRef ORETag,
1123                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1124                                 Instruction *I) {
1125   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1126   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1127   ORE->emit(
1128       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1129       << "loop not vectorized: " << OREMsg);
1130 }
1131 
1132 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1133                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1134                              Instruction *I) {
1135   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1136   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1137   ORE->emit(
1138       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1139       << Msg);
1140 }
1141 
1142 } // end namespace llvm
1143 
1144 #ifndef NDEBUG
1145 /// \return string containing a file name and a line # for the given loop.
1146 static std::string getDebugLocString(const Loop *L) {
1147   std::string Result;
1148   if (L) {
1149     raw_string_ostream OS(Result);
1150     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1151       LoopDbgLoc.print(OS);
1152     else
1153       // Just print the module name.
1154       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1155     OS.flush();
1156   }
1157   return Result;
1158 }
1159 #endif
1160 
1161 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1162                                          const Instruction *Orig) {
1163   // If the loop was versioned with memchecks, add the corresponding no-alias
1164   // metadata.
1165   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1166     LVer->annotateInstWithNoAlias(To, Orig);
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(Instruction *To,
1170                                       Instruction *From) {
1171   propagateMetadata(To, From);
1172   addNewMetadata(To, From);
1173 }
1174 
1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1176                                       Instruction *From) {
1177   for (Value *V : To) {
1178     if (Instruction *I = dyn_cast<Instruction>(V))
1179       addMetadata(I, From);
1180   }
1181 }
1182 
1183 namespace llvm {
1184 
1185 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1186 // lowered.
1187 enum ScalarEpilogueLowering {
1188 
1189   // The default: allowing scalar epilogues.
1190   CM_ScalarEpilogueAllowed,
1191 
1192   // Vectorization with OptForSize: don't allow epilogues.
1193   CM_ScalarEpilogueNotAllowedOptSize,
1194 
1195   // A special case of vectorisation with OptForSize: loops with a very small
1196   // trip count are considered for vectorization under OptForSize, thereby
1197   // making sure the cost of their loop body is dominant, free of runtime
1198   // guards and scalar iteration overheads.
1199   CM_ScalarEpilogueNotAllowedLowTripLoop,
1200 
1201   // Loop hint predicate indicating an epilogue is undesired.
1202   CM_ScalarEpilogueNotNeededUsePredicate,
1203 
1204   // Directive indicating we must either tail fold or not vectorize
1205   CM_ScalarEpilogueNotAllowedUsePredicate
1206 };
1207 
1208 /// ElementCountComparator creates a total ordering for ElementCount
1209 /// for the purposes of using it in a set structure.
1210 struct ElementCountComparator {
1211   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1212     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1213            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1214   }
1215 };
1216 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1217 
1218 /// LoopVectorizationCostModel - estimates the expected speedups due to
1219 /// vectorization.
1220 /// In many cases vectorization is not profitable. This can happen because of
1221 /// a number of reasons. In this class we mainly attempt to predict the
1222 /// expected speedup/slowdowns due to the supported instruction set. We use the
1223 /// TargetTransformInfo to query the different backends for the cost of
1224 /// different operations.
1225 class LoopVectorizationCostModel {
1226 public:
1227   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1228                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1229                              LoopVectorizationLegality *Legal,
1230                              const TargetTransformInfo &TTI,
1231                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1232                              AssumptionCache *AC,
1233                              OptimizationRemarkEmitter *ORE, const Function *F,
1234                              const LoopVectorizeHints *Hints,
1235                              InterleavedAccessInfo &IAI)
1236       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1237         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1238         Hints(Hints), InterleaveInfo(IAI) {}
1239 
1240   /// \return An upper bound for the vectorization factors (both fixed and
1241   /// scalable). If the factors are 0, vectorization and interleaving should be
1242   /// avoided up front.
1243   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1244 
1245   /// \return True if runtime checks are required for vectorization, and false
1246   /// otherwise.
1247   bool runtimeChecksRequired();
1248 
1249   /// \return The most profitable vectorization factor and the cost of that VF.
1250   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1251   /// then this vectorization factor will be selected if vectorization is
1252   /// possible.
1253   VectorizationFactor
1254   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1255 
1256   VectorizationFactor
1257   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1258                                     const LoopVectorizationPlanner &LVP);
1259 
1260   /// Setup cost-based decisions for user vectorization factor.
1261   void selectUserVectorizationFactor(ElementCount UserVF) {
1262     collectUniformsAndScalars(UserVF);
1263     collectInstsToScalarize(UserVF);
1264   }
1265 
1266   /// \return The size (in bits) of the smallest and widest types in the code
1267   /// that needs to be vectorized. We ignore values that remain scalar such as
1268   /// 64 bit loop indices.
1269   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1270 
1271   /// \return The desired interleave count.
1272   /// If interleave count has been specified by metadata it will be returned.
1273   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1274   /// are the selected vectorization factor and the cost of the selected VF.
1275   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1276 
1277   /// Memory access instruction may be vectorized in more than one way.
1278   /// Form of instruction after vectorization depends on cost.
1279   /// This function takes cost-based decisions for Load/Store instructions
1280   /// and collects them in a map. This decisions map is used for building
1281   /// the lists of loop-uniform and loop-scalar instructions.
1282   /// The calculated cost is saved with widening decision in order to
1283   /// avoid redundant calculations.
1284   void setCostBasedWideningDecision(ElementCount VF);
1285 
1286   /// A struct that represents some properties of the register usage
1287   /// of a loop.
1288   struct RegisterUsage {
1289     /// Holds the number of loop invariant values that are used in the loop.
1290     /// The key is ClassID of target-provided register class.
1291     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1292     /// Holds the maximum number of concurrent live intervals in the loop.
1293     /// The key is ClassID of target-provided register class.
1294     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1295   };
1296 
1297   /// \return Returns information about the register usages of the loop for the
1298   /// given vectorization factors.
1299   SmallVector<RegisterUsage, 8>
1300   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1301 
1302   /// Collect values we want to ignore in the cost model.
1303   void collectValuesToIgnore();
1304 
1305   /// Split reductions into those that happen in the loop, and those that happen
1306   /// outside. In loop reductions are collected into InLoopReductionChains.
1307   void collectInLoopReductions();
1308 
1309   /// Returns true if we should use strict in-order reductions for the given
1310   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1311   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1312   /// of FP operations.
1313   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1314     return EnableStrictReductions && !Hints->allowReordering() &&
1315            RdxDesc.isOrdered();
1316   }
1317 
1318   /// \returns The smallest bitwidth each instruction can be represented with.
1319   /// The vector equivalents of these instructions should be truncated to this
1320   /// type.
1321   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1322     return MinBWs;
1323   }
1324 
1325   /// \returns True if it is more profitable to scalarize instruction \p I for
1326   /// vectorization factor \p VF.
1327   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1328     assert(VF.isVector() &&
1329            "Profitable to scalarize relevant only for VF > 1.");
1330 
1331     // Cost model is not run in the VPlan-native path - return conservative
1332     // result until this changes.
1333     if (EnableVPlanNativePath)
1334       return false;
1335 
1336     auto Scalars = InstsToScalarize.find(VF);
1337     assert(Scalars != InstsToScalarize.end() &&
1338            "VF not yet analyzed for scalarization profitability");
1339     return Scalars->second.find(I) != Scalars->second.end();
1340   }
1341 
1342   /// Returns true if \p I is known to be uniform after vectorization.
1343   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1344     if (VF.isScalar())
1345       return true;
1346 
1347     // Cost model is not run in the VPlan-native path - return conservative
1348     // result until this changes.
1349     if (EnableVPlanNativePath)
1350       return false;
1351 
1352     auto UniformsPerVF = Uniforms.find(VF);
1353     assert(UniformsPerVF != Uniforms.end() &&
1354            "VF not yet analyzed for uniformity");
1355     return UniformsPerVF->second.count(I);
1356   }
1357 
1358   /// Returns true if \p I is known to be scalar after vectorization.
1359   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1360     if (VF.isScalar())
1361       return true;
1362 
1363     // Cost model is not run in the VPlan-native path - return conservative
1364     // result until this changes.
1365     if (EnableVPlanNativePath)
1366       return false;
1367 
1368     auto ScalarsPerVF = Scalars.find(VF);
1369     assert(ScalarsPerVF != Scalars.end() &&
1370            "Scalar values are not calculated for VF");
1371     return ScalarsPerVF->second.count(I);
1372   }
1373 
1374   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1375   /// for vectorization factor \p VF.
1376   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1377     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1378            !isProfitableToScalarize(I, VF) &&
1379            !isScalarAfterVectorization(I, VF);
1380   }
1381 
1382   /// Decision that was taken during cost calculation for memory instruction.
1383   enum InstWidening {
1384     CM_Unknown,
1385     CM_Widen,         // For consecutive accesses with stride +1.
1386     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1387     CM_Interleave,
1388     CM_GatherScatter,
1389     CM_Scalarize
1390   };
1391 
1392   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1393   /// instruction \p I and vector width \p VF.
1394   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1395                            InstructionCost Cost) {
1396     assert(VF.isVector() && "Expected VF >=2");
1397     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1398   }
1399 
1400   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1401   /// interleaving group \p Grp and vector width \p VF.
1402   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1403                            ElementCount VF, InstWidening W,
1404                            InstructionCost Cost) {
1405     assert(VF.isVector() && "Expected VF >=2");
1406     /// Broadcast this decicion to all instructions inside the group.
1407     /// But the cost will be assigned to one instruction only.
1408     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1409       if (auto *I = Grp->getMember(i)) {
1410         if (Grp->getInsertPos() == I)
1411           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1412         else
1413           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1414       }
1415     }
1416   }
1417 
1418   /// Return the cost model decision for the given instruction \p I and vector
1419   /// width \p VF. Return CM_Unknown if this instruction did not pass
1420   /// through the cost modeling.
1421   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1422     assert(VF.isVector() && "Expected VF to be a vector VF");
1423     // Cost model is not run in the VPlan-native path - return conservative
1424     // result until this changes.
1425     if (EnableVPlanNativePath)
1426       return CM_GatherScatter;
1427 
1428     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1429     auto Itr = WideningDecisions.find(InstOnVF);
1430     if (Itr == WideningDecisions.end())
1431       return CM_Unknown;
1432     return Itr->second.first;
1433   }
1434 
1435   /// Return the vectorization cost for the given instruction \p I and vector
1436   /// width \p VF.
1437   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1438     assert(VF.isVector() && "Expected VF >=2");
1439     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1440     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1441            "The cost is not calculated");
1442     return WideningDecisions[InstOnVF].second;
1443   }
1444 
1445   /// Return True if instruction \p I is an optimizable truncate whose operand
1446   /// is an induction variable. Such a truncate will be removed by adding a new
1447   /// induction variable with the destination type.
1448   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1449     // If the instruction is not a truncate, return false.
1450     auto *Trunc = dyn_cast<TruncInst>(I);
1451     if (!Trunc)
1452       return false;
1453 
1454     // Get the source and destination types of the truncate.
1455     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1456     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1457 
1458     // If the truncate is free for the given types, return false. Replacing a
1459     // free truncate with an induction variable would add an induction variable
1460     // update instruction to each iteration of the loop. We exclude from this
1461     // check the primary induction variable since it will need an update
1462     // instruction regardless.
1463     Value *Op = Trunc->getOperand(0);
1464     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1465       return false;
1466 
1467     // If the truncated value is not an induction variable, return false.
1468     return Legal->isInductionPhi(Op);
1469   }
1470 
1471   /// Collects the instructions to scalarize for each predicated instruction in
1472   /// the loop.
1473   void collectInstsToScalarize(ElementCount VF);
1474 
1475   /// Collect Uniform and Scalar values for the given \p VF.
1476   /// The sets depend on CM decision for Load/Store instructions
1477   /// that may be vectorized as interleave, gather-scatter or scalarized.
1478   void collectUniformsAndScalars(ElementCount VF) {
1479     // Do the analysis once.
1480     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1481       return;
1482     setCostBasedWideningDecision(VF);
1483     collectLoopUniforms(VF);
1484     collectLoopScalars(VF);
1485   }
1486 
1487   /// Returns true if the target machine supports masked store operation
1488   /// for the given \p DataType and kind of access to \p Ptr.
1489   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1490     return Legal->isConsecutivePtr(Ptr) &&
1491            TTI.isLegalMaskedStore(DataType, Alignment);
1492   }
1493 
1494   /// Returns true if the target machine supports masked load operation
1495   /// for the given \p DataType and kind of access to \p Ptr.
1496   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1497     return Legal->isConsecutivePtr(Ptr) &&
1498            TTI.isLegalMaskedLoad(DataType, Alignment);
1499   }
1500 
1501   /// Returns true if the target machine can represent \p V as a masked gather
1502   /// or scatter operation.
1503   bool isLegalGatherOrScatter(Value *V) {
1504     bool LI = isa<LoadInst>(V);
1505     bool SI = isa<StoreInst>(V);
1506     if (!LI && !SI)
1507       return false;
1508     auto *Ty = getLoadStoreType(V);
1509     Align Align = getLoadStoreAlignment(V);
1510     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1511            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1512   }
1513 
1514   /// Returns true if the target machine supports all of the reduction
1515   /// variables found for the given VF.
1516   bool canVectorizeReductions(ElementCount VF) {
1517     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1518       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1519       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1520     }));
1521   }
1522 
1523   /// Returns true if \p I is an instruction that will be scalarized with
1524   /// predication. Such instructions include conditional stores and
1525   /// instructions that may divide by zero.
1526   /// If a non-zero VF has been calculated, we check if I will be scalarized
1527   /// predication for that VF.
1528   bool isScalarWithPredication(Instruction *I) const;
1529 
1530   // Returns true if \p I is an instruction that will be predicated either
1531   // through scalar predication or masked load/store or masked gather/scatter.
1532   // Superset of instructions that return true for isScalarWithPredication.
1533   bool isPredicatedInst(Instruction *I) {
1534     if (!blockNeedsPredication(I->getParent()))
1535       return false;
1536     // Loads and stores that need some form of masked operation are predicated
1537     // instructions.
1538     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1539       return Legal->isMaskRequired(I);
1540     return isScalarWithPredication(I);
1541   }
1542 
1543   /// Returns true if \p I is a memory instruction with consecutive memory
1544   /// access that can be widened.
1545   bool
1546   memoryInstructionCanBeWidened(Instruction *I,
1547                                 ElementCount VF = ElementCount::getFixed(1));
1548 
1549   /// Returns true if \p I is a memory instruction in an interleaved-group
1550   /// of memory accesses that can be vectorized with wide vector loads/stores
1551   /// and shuffles.
1552   bool
1553   interleavedAccessCanBeWidened(Instruction *I,
1554                                 ElementCount VF = ElementCount::getFixed(1));
1555 
1556   /// Check if \p Instr belongs to any interleaved access group.
1557   bool isAccessInterleaved(Instruction *Instr) {
1558     return InterleaveInfo.isInterleaved(Instr);
1559   }
1560 
1561   /// Get the interleaved access group that \p Instr belongs to.
1562   const InterleaveGroup<Instruction> *
1563   getInterleavedAccessGroup(Instruction *Instr) {
1564     return InterleaveInfo.getInterleaveGroup(Instr);
1565   }
1566 
1567   /// Returns true if we're required to use a scalar epilogue for at least
1568   /// the final iteration of the original loop.
1569   bool requiresScalarEpilogue(ElementCount VF) const {
1570     if (!isScalarEpilogueAllowed())
1571       return false;
1572     // If we might exit from anywhere but the latch, must run the exiting
1573     // iteration in scalar form.
1574     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1575       return true;
1576     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1577   }
1578 
1579   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1580   /// loop hint annotation.
1581   bool isScalarEpilogueAllowed() const {
1582     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1583   }
1584 
1585   /// Returns true if all loop blocks should be masked to fold tail loop.
1586   bool foldTailByMasking() const { return FoldTailByMasking; }
1587 
1588   bool blockNeedsPredication(BasicBlock *BB) const {
1589     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1590   }
1591 
1592   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1593   /// nodes to the chain of instructions representing the reductions. Uses a
1594   /// MapVector to ensure deterministic iteration order.
1595   using ReductionChainMap =
1596       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1597 
1598   /// Return the chain of instructions representing an inloop reduction.
1599   const ReductionChainMap &getInLoopReductionChains() const {
1600     return InLoopReductionChains;
1601   }
1602 
1603   /// Returns true if the Phi is part of an inloop reduction.
1604   bool isInLoopReduction(PHINode *Phi) const {
1605     return InLoopReductionChains.count(Phi);
1606   }
1607 
1608   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1609   /// with factor VF.  Return the cost of the instruction, including
1610   /// scalarization overhead if it's needed.
1611   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1612 
1613   /// Estimate cost of a call instruction CI if it were vectorized with factor
1614   /// VF. Return the cost of the instruction, including scalarization overhead
1615   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1616   /// scalarized -
1617   /// i.e. either vector version isn't available, or is too expensive.
1618   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1619                                     bool &NeedToScalarize) const;
1620 
1621   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1622   /// that of B.
1623   bool isMoreProfitable(const VectorizationFactor &A,
1624                         const VectorizationFactor &B) const;
1625 
1626   /// Invalidates decisions already taken by the cost model.
1627   void invalidateCostModelingDecisions() {
1628     WideningDecisions.clear();
1629     Uniforms.clear();
1630     Scalars.clear();
1631   }
1632 
1633 private:
1634   unsigned NumPredStores = 0;
1635 
1636   /// \return An upper bound for the vectorization factors for both
1637   /// fixed and scalable vectorization, where the minimum-known number of
1638   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1639   /// disabled or unsupported, then the scalable part will be equal to
1640   /// ElementCount::getScalable(0).
1641   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1642                                            ElementCount UserVF);
1643 
1644   /// \return the maximized element count based on the targets vector
1645   /// registers and the loop trip-count, but limited to a maximum safe VF.
1646   /// This is a helper function of computeFeasibleMaxVF.
1647   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1648   /// issue that occurred on one of the buildbots which cannot be reproduced
1649   /// without having access to the properietary compiler (see comments on
1650   /// D98509). The issue is currently under investigation and this workaround
1651   /// will be removed as soon as possible.
1652   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1653                                        unsigned SmallestType,
1654                                        unsigned WidestType,
1655                                        const ElementCount &MaxSafeVF);
1656 
1657   /// \return the maximum legal scalable VF, based on the safe max number
1658   /// of elements.
1659   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1660 
1661   /// The vectorization cost is a combination of the cost itself and a boolean
1662   /// indicating whether any of the contributing operations will actually
1663   /// operate on vector values after type legalization in the backend. If this
1664   /// latter value is false, then all operations will be scalarized (i.e. no
1665   /// vectorization has actually taken place).
1666   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1667 
1668   /// Returns the expected execution cost. The unit of the cost does
1669   /// not matter because we use the 'cost' units to compare different
1670   /// vector widths. The cost that is returned is *not* normalized by
1671   /// the factor width.
1672   VectorizationCostTy expectedCost(ElementCount VF);
1673 
1674   /// Returns the execution time cost of an instruction for a given vector
1675   /// width. Vector width of one means scalar.
1676   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1677 
1678   /// The cost-computation logic from getInstructionCost which provides
1679   /// the vector type as an output parameter.
1680   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1681                                      Type *&VectorTy);
1682 
1683   /// Return the cost of instructions in an inloop reduction pattern, if I is
1684   /// part of that pattern.
1685   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1686                                           Type *VectorTy,
1687                                           TTI::TargetCostKind CostKind);
1688 
1689   /// Calculate vectorization cost of memory instruction \p I.
1690   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1691 
1692   /// The cost computation for scalarized memory instruction.
1693   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1694 
1695   /// The cost computation for interleaving group of memory instructions.
1696   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1697 
1698   /// The cost computation for Gather/Scatter instruction.
1699   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1700 
1701   /// The cost computation for widening instruction \p I with consecutive
1702   /// memory access.
1703   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1704 
1705   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1706   /// Load: scalar load + broadcast.
1707   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1708   /// element)
1709   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1710 
1711   /// Estimate the overhead of scalarizing an instruction. This is a
1712   /// convenience wrapper for the type-based getScalarizationOverhead API.
1713   InstructionCost getScalarizationOverhead(Instruction *I,
1714                                            ElementCount VF) const;
1715 
1716   /// Returns whether the instruction is a load or store and will be a emitted
1717   /// as a vector operation.
1718   bool isConsecutiveLoadOrStore(Instruction *I);
1719 
1720   /// Returns true if an artificially high cost for emulated masked memrefs
1721   /// should be used.
1722   bool useEmulatedMaskMemRefHack(Instruction *I);
1723 
1724   /// Map of scalar integer values to the smallest bitwidth they can be legally
1725   /// represented as. The vector equivalents of these values should be truncated
1726   /// to this type.
1727   MapVector<Instruction *, uint64_t> MinBWs;
1728 
1729   /// A type representing the costs for instructions if they were to be
1730   /// scalarized rather than vectorized. The entries are Instruction-Cost
1731   /// pairs.
1732   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1733 
1734   /// A set containing all BasicBlocks that are known to present after
1735   /// vectorization as a predicated block.
1736   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1737 
1738   /// Records whether it is allowed to have the original scalar loop execute at
1739   /// least once. This may be needed as a fallback loop in case runtime
1740   /// aliasing/dependence checks fail, or to handle the tail/remainder
1741   /// iterations when the trip count is unknown or doesn't divide by the VF,
1742   /// or as a peel-loop to handle gaps in interleave-groups.
1743   /// Under optsize and when the trip count is very small we don't allow any
1744   /// iterations to execute in the scalar loop.
1745   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1746 
1747   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1748   bool FoldTailByMasking = false;
1749 
1750   /// A map holding scalar costs for different vectorization factors. The
1751   /// presence of a cost for an instruction in the mapping indicates that the
1752   /// instruction will be scalarized when vectorizing with the associated
1753   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1754   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1755 
1756   /// Holds the instructions known to be uniform after vectorization.
1757   /// The data is collected per VF.
1758   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1759 
1760   /// Holds the instructions known to be scalar after vectorization.
1761   /// The data is collected per VF.
1762   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1763 
1764   /// Holds the instructions (address computations) that are forced to be
1765   /// scalarized.
1766   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1767 
1768   /// PHINodes of the reductions that should be expanded in-loop along with
1769   /// their associated chains of reduction operations, in program order from top
1770   /// (PHI) to bottom
1771   ReductionChainMap InLoopReductionChains;
1772 
1773   /// A Map of inloop reduction operations and their immediate chain operand.
1774   /// FIXME: This can be removed once reductions can be costed correctly in
1775   /// vplan. This was added to allow quick lookup to the inloop operations,
1776   /// without having to loop through InLoopReductionChains.
1777   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1778 
1779   /// Returns the expected difference in cost from scalarizing the expression
1780   /// feeding a predicated instruction \p PredInst. The instructions to
1781   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1782   /// non-negative return value implies the expression will be scalarized.
1783   /// Currently, only single-use chains are considered for scalarization.
1784   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1785                               ElementCount VF);
1786 
1787   /// Collect the instructions that are uniform after vectorization. An
1788   /// instruction is uniform if we represent it with a single scalar value in
1789   /// the vectorized loop corresponding to each vector iteration. Examples of
1790   /// uniform instructions include pointer operands of consecutive or
1791   /// interleaved memory accesses. Note that although uniformity implies an
1792   /// instruction will be scalar, the reverse is not true. In general, a
1793   /// scalarized instruction will be represented by VF scalar values in the
1794   /// vectorized loop, each corresponding to an iteration of the original
1795   /// scalar loop.
1796   void collectLoopUniforms(ElementCount VF);
1797 
1798   /// Collect the instructions that are scalar after vectorization. An
1799   /// instruction is scalar if it is known to be uniform or will be scalarized
1800   /// during vectorization. Non-uniform scalarized instructions will be
1801   /// represented by VF values in the vectorized loop, each corresponding to an
1802   /// iteration of the original scalar loop.
1803   void collectLoopScalars(ElementCount VF);
1804 
1805   /// Keeps cost model vectorization decision and cost for instructions.
1806   /// Right now it is used for memory instructions only.
1807   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1808                                 std::pair<InstWidening, InstructionCost>>;
1809 
1810   DecisionList WideningDecisions;
1811 
1812   /// Returns true if \p V is expected to be vectorized and it needs to be
1813   /// extracted.
1814   bool needsExtract(Value *V, ElementCount VF) const {
1815     Instruction *I = dyn_cast<Instruction>(V);
1816     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1817         TheLoop->isLoopInvariant(I))
1818       return false;
1819 
1820     // Assume we can vectorize V (and hence we need extraction) if the
1821     // scalars are not computed yet. This can happen, because it is called
1822     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1823     // the scalars are collected. That should be a safe assumption in most
1824     // cases, because we check if the operands have vectorizable types
1825     // beforehand in LoopVectorizationLegality.
1826     return Scalars.find(VF) == Scalars.end() ||
1827            !isScalarAfterVectorization(I, VF);
1828   };
1829 
1830   /// Returns a range containing only operands needing to be extracted.
1831   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1832                                                    ElementCount VF) const {
1833     return SmallVector<Value *, 4>(make_filter_range(
1834         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1835   }
1836 
1837   /// Determines if we have the infrastructure to vectorize loop \p L and its
1838   /// epilogue, assuming the main loop is vectorized by \p VF.
1839   bool isCandidateForEpilogueVectorization(const Loop &L,
1840                                            const ElementCount VF) const;
1841 
1842   /// Returns true if epilogue vectorization is considered profitable, and
1843   /// false otherwise.
1844   /// \p VF is the vectorization factor chosen for the original loop.
1845   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1846 
1847 public:
1848   /// The loop that we evaluate.
1849   Loop *TheLoop;
1850 
1851   /// Predicated scalar evolution analysis.
1852   PredicatedScalarEvolution &PSE;
1853 
1854   /// Loop Info analysis.
1855   LoopInfo *LI;
1856 
1857   /// Vectorization legality.
1858   LoopVectorizationLegality *Legal;
1859 
1860   /// Vector target information.
1861   const TargetTransformInfo &TTI;
1862 
1863   /// Target Library Info.
1864   const TargetLibraryInfo *TLI;
1865 
1866   /// Demanded bits analysis.
1867   DemandedBits *DB;
1868 
1869   /// Assumption cache.
1870   AssumptionCache *AC;
1871 
1872   /// Interface to emit optimization remarks.
1873   OptimizationRemarkEmitter *ORE;
1874 
1875   const Function *TheFunction;
1876 
1877   /// Loop Vectorize Hint.
1878   const LoopVectorizeHints *Hints;
1879 
1880   /// The interleave access information contains groups of interleaved accesses
1881   /// with the same stride and close to each other.
1882   InterleavedAccessInfo &InterleaveInfo;
1883 
1884   /// Values to ignore in the cost model.
1885   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1886 
1887   /// Values to ignore in the cost model when VF > 1.
1888   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1889 
1890   /// Profitable vector factors.
1891   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1892 };
1893 } // end namespace llvm
1894 
1895 /// Helper struct to manage generating runtime checks for vectorization.
1896 ///
1897 /// The runtime checks are created up-front in temporary blocks to allow better
1898 /// estimating the cost and un-linked from the existing IR. After deciding to
1899 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1900 /// temporary blocks are completely removed.
1901 class GeneratedRTChecks {
1902   /// Basic block which contains the generated SCEV checks, if any.
1903   BasicBlock *SCEVCheckBlock = nullptr;
1904 
1905   /// The value representing the result of the generated SCEV checks. If it is
1906   /// nullptr, either no SCEV checks have been generated or they have been used.
1907   Value *SCEVCheckCond = nullptr;
1908 
1909   /// Basic block which contains the generated memory runtime checks, if any.
1910   BasicBlock *MemCheckBlock = nullptr;
1911 
1912   /// The value representing the result of the generated memory runtime checks.
1913   /// If it is nullptr, either no memory runtime checks have been generated or
1914   /// they have been used.
1915   Instruction *MemRuntimeCheckCond = nullptr;
1916 
1917   DominatorTree *DT;
1918   LoopInfo *LI;
1919 
1920   SCEVExpander SCEVExp;
1921   SCEVExpander MemCheckExp;
1922 
1923 public:
1924   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1925                     const DataLayout &DL)
1926       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1927         MemCheckExp(SE, DL, "scev.check") {}
1928 
1929   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1930   /// accurately estimate the cost of the runtime checks. The blocks are
1931   /// un-linked from the IR and is added back during vector code generation. If
1932   /// there is no vector code generation, the check blocks are removed
1933   /// completely.
1934   void Create(Loop *L, const LoopAccessInfo &LAI,
1935               const SCEVUnionPredicate &UnionPred) {
1936 
1937     BasicBlock *LoopHeader = L->getHeader();
1938     BasicBlock *Preheader = L->getLoopPreheader();
1939 
1940     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1941     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1942     // may be used by SCEVExpander. The blocks will be un-linked from their
1943     // predecessors and removed from LI & DT at the end of the function.
1944     if (!UnionPred.isAlwaysTrue()) {
1945       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1946                                   nullptr, "vector.scevcheck");
1947 
1948       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1949           &UnionPred, SCEVCheckBlock->getTerminator());
1950     }
1951 
1952     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1953     if (RtPtrChecking.Need) {
1954       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1955       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1956                                  "vector.memcheck");
1957 
1958       std::tie(std::ignore, MemRuntimeCheckCond) =
1959           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1960                            RtPtrChecking.getChecks(), MemCheckExp);
1961       assert(MemRuntimeCheckCond &&
1962              "no RT checks generated although RtPtrChecking "
1963              "claimed checks are required");
1964     }
1965 
1966     if (!MemCheckBlock && !SCEVCheckBlock)
1967       return;
1968 
1969     // Unhook the temporary block with the checks, update various places
1970     // accordingly.
1971     if (SCEVCheckBlock)
1972       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1973     if (MemCheckBlock)
1974       MemCheckBlock->replaceAllUsesWith(Preheader);
1975 
1976     if (SCEVCheckBlock) {
1977       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1978       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1979       Preheader->getTerminator()->eraseFromParent();
1980     }
1981     if (MemCheckBlock) {
1982       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1983       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1984       Preheader->getTerminator()->eraseFromParent();
1985     }
1986 
1987     DT->changeImmediateDominator(LoopHeader, Preheader);
1988     if (MemCheckBlock) {
1989       DT->eraseNode(MemCheckBlock);
1990       LI->removeBlock(MemCheckBlock);
1991     }
1992     if (SCEVCheckBlock) {
1993       DT->eraseNode(SCEVCheckBlock);
1994       LI->removeBlock(SCEVCheckBlock);
1995     }
1996   }
1997 
1998   /// Remove the created SCEV & memory runtime check blocks & instructions, if
1999   /// unused.
2000   ~GeneratedRTChecks() {
2001     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
2002     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
2003     if (!SCEVCheckCond)
2004       SCEVCleaner.markResultUsed();
2005 
2006     if (!MemRuntimeCheckCond)
2007       MemCheckCleaner.markResultUsed();
2008 
2009     if (MemRuntimeCheckCond) {
2010       auto &SE = *MemCheckExp.getSE();
2011       // Memory runtime check generation creates compares that use expanded
2012       // values. Remove them before running the SCEVExpanderCleaners.
2013       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2014         if (MemCheckExp.isInsertedInstruction(&I))
2015           continue;
2016         SE.forgetValue(&I);
2017         SE.eraseValueFromMap(&I);
2018         I.eraseFromParent();
2019       }
2020     }
2021     MemCheckCleaner.cleanup();
2022     SCEVCleaner.cleanup();
2023 
2024     if (SCEVCheckCond)
2025       SCEVCheckBlock->eraseFromParent();
2026     if (MemRuntimeCheckCond)
2027       MemCheckBlock->eraseFromParent();
2028   }
2029 
2030   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2031   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2032   /// depending on the generated condition.
2033   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2034                              BasicBlock *LoopVectorPreHeader,
2035                              BasicBlock *LoopExitBlock) {
2036     if (!SCEVCheckCond)
2037       return nullptr;
2038     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2039       if (C->isZero())
2040         return nullptr;
2041 
2042     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2043 
2044     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2045     // Create new preheader for vector loop.
2046     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2047       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2048 
2049     SCEVCheckBlock->getTerminator()->eraseFromParent();
2050     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2051     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2052                                                 SCEVCheckBlock);
2053 
2054     DT->addNewBlock(SCEVCheckBlock, Pred);
2055     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2056 
2057     ReplaceInstWithInst(
2058         SCEVCheckBlock->getTerminator(),
2059         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2060     // Mark the check as used, to prevent it from being removed during cleanup.
2061     SCEVCheckCond = nullptr;
2062     return SCEVCheckBlock;
2063   }
2064 
2065   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2066   /// the branches to branch to the vector preheader or \p Bypass, depending on
2067   /// the generated condition.
2068   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2069                                    BasicBlock *LoopVectorPreHeader) {
2070     // Check if we generated code that checks in runtime if arrays overlap.
2071     if (!MemRuntimeCheckCond)
2072       return nullptr;
2073 
2074     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2075     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2076                                                 MemCheckBlock);
2077 
2078     DT->addNewBlock(MemCheckBlock, Pred);
2079     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2080     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2081 
2082     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2083       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2084 
2085     ReplaceInstWithInst(
2086         MemCheckBlock->getTerminator(),
2087         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2088     MemCheckBlock->getTerminator()->setDebugLoc(
2089         Pred->getTerminator()->getDebugLoc());
2090 
2091     // Mark the check as used, to prevent it from being removed during cleanup.
2092     MemRuntimeCheckCond = nullptr;
2093     return MemCheckBlock;
2094   }
2095 };
2096 
2097 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2098 // vectorization. The loop needs to be annotated with #pragma omp simd
2099 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2100 // vector length information is not provided, vectorization is not considered
2101 // explicit. Interleave hints are not allowed either. These limitations will be
2102 // relaxed in the future.
2103 // Please, note that we are currently forced to abuse the pragma 'clang
2104 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2105 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2106 // provides *explicit vectorization hints* (LV can bypass legal checks and
2107 // assume that vectorization is legal). However, both hints are implemented
2108 // using the same metadata (llvm.loop.vectorize, processed by
2109 // LoopVectorizeHints). This will be fixed in the future when the native IR
2110 // representation for pragma 'omp simd' is introduced.
2111 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2112                                    OptimizationRemarkEmitter *ORE) {
2113   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2114   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2115 
2116   // Only outer loops with an explicit vectorization hint are supported.
2117   // Unannotated outer loops are ignored.
2118   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2119     return false;
2120 
2121   Function *Fn = OuterLp->getHeader()->getParent();
2122   if (!Hints.allowVectorization(Fn, OuterLp,
2123                                 true /*VectorizeOnlyWhenForced*/)) {
2124     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2125     return false;
2126   }
2127 
2128   if (Hints.getInterleave() > 1) {
2129     // TODO: Interleave support is future work.
2130     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2131                          "outer loops.\n");
2132     Hints.emitRemarkWithHints();
2133     return false;
2134   }
2135 
2136   return true;
2137 }
2138 
2139 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2140                                   OptimizationRemarkEmitter *ORE,
2141                                   SmallVectorImpl<Loop *> &V) {
2142   // Collect inner loops and outer loops without irreducible control flow. For
2143   // now, only collect outer loops that have explicit vectorization hints. If we
2144   // are stress testing the VPlan H-CFG construction, we collect the outermost
2145   // loop of every loop nest.
2146   if (L.isInnermost() || VPlanBuildStressTest ||
2147       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2148     LoopBlocksRPO RPOT(&L);
2149     RPOT.perform(LI);
2150     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2151       V.push_back(&L);
2152       // TODO: Collect inner loops inside marked outer loops in case
2153       // vectorization fails for the outer loop. Do not invoke
2154       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2155       // already known to be reducible. We can use an inherited attribute for
2156       // that.
2157       return;
2158     }
2159   }
2160   for (Loop *InnerL : L)
2161     collectSupportedLoops(*InnerL, LI, ORE, V);
2162 }
2163 
2164 namespace {
2165 
2166 /// The LoopVectorize Pass.
2167 struct LoopVectorize : public FunctionPass {
2168   /// Pass identification, replacement for typeid
2169   static char ID;
2170 
2171   LoopVectorizePass Impl;
2172 
2173   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2174                          bool VectorizeOnlyWhenForced = false)
2175       : FunctionPass(ID),
2176         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2177     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2178   }
2179 
2180   bool runOnFunction(Function &F) override {
2181     if (skipFunction(F))
2182       return false;
2183 
2184     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2185     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2186     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2187     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2188     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2189     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2190     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2191     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2192     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2193     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2194     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2195     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2196     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2197 
2198     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2199         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2200 
2201     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2202                         GetLAA, *ORE, PSI).MadeAnyChange;
2203   }
2204 
2205   void getAnalysisUsage(AnalysisUsage &AU) const override {
2206     AU.addRequired<AssumptionCacheTracker>();
2207     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2208     AU.addRequired<DominatorTreeWrapperPass>();
2209     AU.addRequired<LoopInfoWrapperPass>();
2210     AU.addRequired<ScalarEvolutionWrapperPass>();
2211     AU.addRequired<TargetTransformInfoWrapperPass>();
2212     AU.addRequired<AAResultsWrapperPass>();
2213     AU.addRequired<LoopAccessLegacyAnalysis>();
2214     AU.addRequired<DemandedBitsWrapperPass>();
2215     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2216     AU.addRequired<InjectTLIMappingsLegacy>();
2217 
2218     // We currently do not preserve loopinfo/dominator analyses with outer loop
2219     // vectorization. Until this is addressed, mark these analyses as preserved
2220     // only for non-VPlan-native path.
2221     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2222     if (!EnableVPlanNativePath) {
2223       AU.addPreserved<LoopInfoWrapperPass>();
2224       AU.addPreserved<DominatorTreeWrapperPass>();
2225     }
2226 
2227     AU.addPreserved<BasicAAWrapperPass>();
2228     AU.addPreserved<GlobalsAAWrapperPass>();
2229     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2230   }
2231 };
2232 
2233 } // end anonymous namespace
2234 
2235 //===----------------------------------------------------------------------===//
2236 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2237 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2238 //===----------------------------------------------------------------------===//
2239 
2240 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2241   // We need to place the broadcast of invariant variables outside the loop,
2242   // but only if it's proven safe to do so. Else, broadcast will be inside
2243   // vector loop body.
2244   Instruction *Instr = dyn_cast<Instruction>(V);
2245   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2246                      (!Instr ||
2247                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2248   // Place the code for broadcasting invariant variables in the new preheader.
2249   IRBuilder<>::InsertPointGuard Guard(Builder);
2250   if (SafeToHoist)
2251     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2252 
2253   // Broadcast the scalar into all locations in the vector.
2254   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2255 
2256   return Shuf;
2257 }
2258 
2259 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2260     const InductionDescriptor &II, Value *Step, Value *Start,
2261     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2262     VPTransformState &State) {
2263   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2264          "Expected either an induction phi-node or a truncate of it!");
2265 
2266   // Construct the initial value of the vector IV in the vector loop preheader
2267   auto CurrIP = Builder.saveIP();
2268   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2269   if (isa<TruncInst>(EntryVal)) {
2270     assert(Start->getType()->isIntegerTy() &&
2271            "Truncation requires an integer type");
2272     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2273     Step = Builder.CreateTrunc(Step, TruncType);
2274     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2275   }
2276   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2277   Value *SteppedStart =
2278       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2279 
2280   // We create vector phi nodes for both integer and floating-point induction
2281   // variables. Here, we determine the kind of arithmetic we will perform.
2282   Instruction::BinaryOps AddOp;
2283   Instruction::BinaryOps MulOp;
2284   if (Step->getType()->isIntegerTy()) {
2285     AddOp = Instruction::Add;
2286     MulOp = Instruction::Mul;
2287   } else {
2288     AddOp = II.getInductionOpcode();
2289     MulOp = Instruction::FMul;
2290   }
2291 
2292   // Multiply the vectorization factor by the step using integer or
2293   // floating-point arithmetic as appropriate.
2294   Type *StepType = Step->getType();
2295   if (Step->getType()->isFloatingPointTy())
2296     StepType = IntegerType::get(StepType->getContext(),
2297                                 StepType->getScalarSizeInBits());
2298   Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2299   if (Step->getType()->isFloatingPointTy())
2300     RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType());
2301   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2302 
2303   // Create a vector splat to use in the induction update.
2304   //
2305   // FIXME: If the step is non-constant, we create the vector splat with
2306   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2307   //        handle a constant vector splat.
2308   Value *SplatVF = isa<Constant>(Mul)
2309                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2310                        : Builder.CreateVectorSplat(VF, Mul);
2311   Builder.restoreIP(CurrIP);
2312 
2313   // We may need to add the step a number of times, depending on the unroll
2314   // factor. The last of those goes into the PHI.
2315   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2316                                     &*LoopVectorBody->getFirstInsertionPt());
2317   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2318   Instruction *LastInduction = VecInd;
2319   for (unsigned Part = 0; Part < UF; ++Part) {
2320     State.set(Def, LastInduction, Part);
2321 
2322     if (isa<TruncInst>(EntryVal))
2323       addMetadata(LastInduction, EntryVal);
2324     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2325                                           State, Part);
2326 
2327     LastInduction = cast<Instruction>(
2328         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2329     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2330   }
2331 
2332   // Move the last step to the end of the latch block. This ensures consistent
2333   // placement of all induction updates.
2334   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2335   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2336   auto *ICmp = cast<Instruction>(Br->getCondition());
2337   LastInduction->moveBefore(ICmp);
2338   LastInduction->setName("vec.ind.next");
2339 
2340   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2341   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2342 }
2343 
2344 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2345   return Cost->isScalarAfterVectorization(I, VF) ||
2346          Cost->isProfitableToScalarize(I, VF);
2347 }
2348 
2349 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2350   if (shouldScalarizeInstruction(IV))
2351     return true;
2352   auto isScalarInst = [&](User *U) -> bool {
2353     auto *I = cast<Instruction>(U);
2354     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2355   };
2356   return llvm::any_of(IV->users(), isScalarInst);
2357 }
2358 
2359 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2360     const InductionDescriptor &ID, const Instruction *EntryVal,
2361     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2362     unsigned Part, unsigned Lane) {
2363   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2364          "Expected either an induction phi-node or a truncate of it!");
2365 
2366   // This induction variable is not the phi from the original loop but the
2367   // newly-created IV based on the proof that casted Phi is equal to the
2368   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2369   // re-uses the same InductionDescriptor that original IV uses but we don't
2370   // have to do any recording in this case - that is done when original IV is
2371   // processed.
2372   if (isa<TruncInst>(EntryVal))
2373     return;
2374 
2375   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2376   if (Casts.empty())
2377     return;
2378   // Only the first Cast instruction in the Casts vector is of interest.
2379   // The rest of the Casts (if exist) have no uses outside the
2380   // induction update chain itself.
2381   if (Lane < UINT_MAX)
2382     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2383   else
2384     State.set(CastDef, VectorLoopVal, Part);
2385 }
2386 
2387 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2388                                                 TruncInst *Trunc, VPValue *Def,
2389                                                 VPValue *CastDef,
2390                                                 VPTransformState &State) {
2391   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2392          "Primary induction variable must have an integer type");
2393 
2394   auto II = Legal->getInductionVars().find(IV);
2395   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2396 
2397   auto ID = II->second;
2398   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2399 
2400   // The value from the original loop to which we are mapping the new induction
2401   // variable.
2402   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2403 
2404   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2405 
2406   // Generate code for the induction step. Note that induction steps are
2407   // required to be loop-invariant
2408   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2409     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2410            "Induction step should be loop invariant");
2411     if (PSE.getSE()->isSCEVable(IV->getType())) {
2412       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2413       return Exp.expandCodeFor(Step, Step->getType(),
2414                                LoopVectorPreHeader->getTerminator());
2415     }
2416     return cast<SCEVUnknown>(Step)->getValue();
2417   };
2418 
2419   // The scalar value to broadcast. This is derived from the canonical
2420   // induction variable. If a truncation type is given, truncate the canonical
2421   // induction variable and step. Otherwise, derive these values from the
2422   // induction descriptor.
2423   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2424     Value *ScalarIV = Induction;
2425     if (IV != OldInduction) {
2426       ScalarIV = IV->getType()->isIntegerTy()
2427                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2428                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2429                                           IV->getType());
2430       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2431       ScalarIV->setName("offset.idx");
2432     }
2433     if (Trunc) {
2434       auto *TruncType = cast<IntegerType>(Trunc->getType());
2435       assert(Step->getType()->isIntegerTy() &&
2436              "Truncation requires an integer step");
2437       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2438       Step = Builder.CreateTrunc(Step, TruncType);
2439     }
2440     return ScalarIV;
2441   };
2442 
2443   // Create the vector values from the scalar IV, in the absence of creating a
2444   // vector IV.
2445   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2446     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2447     for (unsigned Part = 0; Part < UF; ++Part) {
2448       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2449       Value *EntryPart =
2450           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2451                         ID.getInductionOpcode());
2452       State.set(Def, EntryPart, Part);
2453       if (Trunc)
2454         addMetadata(EntryPart, Trunc);
2455       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2456                                             State, Part);
2457     }
2458   };
2459 
2460   // Fast-math-flags propagate from the original induction instruction.
2461   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2462   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2463     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2464 
2465   // Now do the actual transformations, and start with creating the step value.
2466   Value *Step = CreateStepValue(ID.getStep());
2467   if (VF.isZero() || VF.isScalar()) {
2468     Value *ScalarIV = CreateScalarIV(Step);
2469     CreateSplatIV(ScalarIV, Step);
2470     return;
2471   }
2472 
2473   // Determine if we want a scalar version of the induction variable. This is
2474   // true if the induction variable itself is not widened, or if it has at
2475   // least one user in the loop that is not widened.
2476   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2477   if (!NeedsScalarIV) {
2478     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2479                                     State);
2480     return;
2481   }
2482 
2483   // Try to create a new independent vector induction variable. If we can't
2484   // create the phi node, we will splat the scalar induction variable in each
2485   // loop iteration.
2486   if (!shouldScalarizeInstruction(EntryVal)) {
2487     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2488                                     State);
2489     Value *ScalarIV = CreateScalarIV(Step);
2490     // Create scalar steps that can be used by instructions we will later
2491     // scalarize. Note that the addition of the scalar steps will not increase
2492     // the number of instructions in the loop in the common case prior to
2493     // InstCombine. We will be trading one vector extract for each scalar step.
2494     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2495     return;
2496   }
2497 
2498   // All IV users are scalar instructions, so only emit a scalar IV, not a
2499   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2500   // predicate used by the masked loads/stores.
2501   Value *ScalarIV = CreateScalarIV(Step);
2502   if (!Cost->isScalarEpilogueAllowed())
2503     CreateSplatIV(ScalarIV, Step);
2504   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2505 }
2506 
2507 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2508                                           Instruction::BinaryOps BinOp) {
2509   // Create and check the types.
2510   auto *ValVTy = cast<VectorType>(Val->getType());
2511   ElementCount VLen = ValVTy->getElementCount();
2512 
2513   Type *STy = Val->getType()->getScalarType();
2514   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2515          "Induction Step must be an integer or FP");
2516   assert(Step->getType() == STy && "Step has wrong type");
2517 
2518   SmallVector<Constant *, 8> Indices;
2519 
2520   // Create a vector of consecutive numbers from zero to VF.
2521   VectorType *InitVecValVTy = ValVTy;
2522   Type *InitVecValSTy = STy;
2523   if (STy->isFloatingPointTy()) {
2524     InitVecValSTy =
2525         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2526     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2527   }
2528   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2529 
2530   // Add on StartIdx
2531   Value *StartIdxSplat = Builder.CreateVectorSplat(
2532       VLen, ConstantInt::get(InitVecValSTy, StartIdx));
2533   InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2534 
2535   if (STy->isIntegerTy()) {
2536     Step = Builder.CreateVectorSplat(VLen, Step);
2537     assert(Step->getType() == Val->getType() && "Invalid step vec");
2538     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2539     // which can be found from the original scalar operations.
2540     Step = Builder.CreateMul(InitVec, Step);
2541     return Builder.CreateAdd(Val, Step, "induction");
2542   }
2543 
2544   // Floating point induction.
2545   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2546          "Binary Opcode should be specified for FP induction");
2547   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2548   Step = Builder.CreateVectorSplat(VLen, Step);
2549   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2550   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2551 }
2552 
2553 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2554                                            Instruction *EntryVal,
2555                                            const InductionDescriptor &ID,
2556                                            VPValue *Def, VPValue *CastDef,
2557                                            VPTransformState &State) {
2558   // We shouldn't have to build scalar steps if we aren't vectorizing.
2559   assert(VF.isVector() && "VF should be greater than one");
2560   // Get the value type and ensure it and the step have the same integer type.
2561   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2562   assert(ScalarIVTy == Step->getType() &&
2563          "Val and Step should have the same type");
2564 
2565   // We build scalar steps for both integer and floating-point induction
2566   // variables. Here, we determine the kind of arithmetic we will perform.
2567   Instruction::BinaryOps AddOp;
2568   Instruction::BinaryOps MulOp;
2569   if (ScalarIVTy->isIntegerTy()) {
2570     AddOp = Instruction::Add;
2571     MulOp = Instruction::Mul;
2572   } else {
2573     AddOp = ID.getInductionOpcode();
2574     MulOp = Instruction::FMul;
2575   }
2576 
2577   // Determine the number of scalars we need to generate for each unroll
2578   // iteration. If EntryVal is uniform, we only need to generate the first
2579   // lane. Otherwise, we generate all VF values.
2580   bool IsUniform =
2581       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2582   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2583   // Compute the scalar steps and save the results in State.
2584   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2585                                      ScalarIVTy->getScalarSizeInBits());
2586   Type *VecIVTy = nullptr;
2587   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2588   if (!IsUniform && VF.isScalable()) {
2589     VecIVTy = VectorType::get(ScalarIVTy, VF);
2590     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2591     SplatStep = Builder.CreateVectorSplat(VF, Step);
2592     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2593   }
2594 
2595   for (unsigned Part = 0; Part < UF; ++Part) {
2596     Value *StartIdx0 =
2597         createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2598 
2599     if (!IsUniform && VF.isScalable()) {
2600       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2601       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2602       if (ScalarIVTy->isFloatingPointTy())
2603         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2604       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2605       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2606       State.set(Def, Add, Part);
2607       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2608                                             Part);
2609       // It's useful to record the lane values too for the known minimum number
2610       // of elements so we do those below. This improves the code quality when
2611       // trying to extract the first element, for example.
2612     }
2613 
2614     if (ScalarIVTy->isFloatingPointTy())
2615       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2616 
2617     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2618       Value *StartIdx = Builder.CreateBinOp(
2619           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2620       // The step returned by `createStepForVF` is a runtime-evaluated value
2621       // when VF is scalable. Otherwise, it should be folded into a Constant.
2622       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2623              "Expected StartIdx to be folded to a constant when VF is not "
2624              "scalable");
2625       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2626       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2627       State.set(Def, Add, VPIteration(Part, Lane));
2628       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2629                                             Part, Lane);
2630     }
2631   }
2632 }
2633 
2634 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2635                                                     const VPIteration &Instance,
2636                                                     VPTransformState &State) {
2637   Value *ScalarInst = State.get(Def, Instance);
2638   Value *VectorValue = State.get(Def, Instance.Part);
2639   VectorValue = Builder.CreateInsertElement(
2640       VectorValue, ScalarInst,
2641       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2642   State.set(Def, VectorValue, Instance.Part);
2643 }
2644 
2645 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2646   assert(Vec->getType()->isVectorTy() && "Invalid type");
2647   return Builder.CreateVectorReverse(Vec, "reverse");
2648 }
2649 
2650 // Return whether we allow using masked interleave-groups (for dealing with
2651 // strided loads/stores that reside in predicated blocks, or for dealing
2652 // with gaps).
2653 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2654   // If an override option has been passed in for interleaved accesses, use it.
2655   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2656     return EnableMaskedInterleavedMemAccesses;
2657 
2658   return TTI.enableMaskedInterleavedAccessVectorization();
2659 }
2660 
2661 // Try to vectorize the interleave group that \p Instr belongs to.
2662 //
2663 // E.g. Translate following interleaved load group (factor = 3):
2664 //   for (i = 0; i < N; i+=3) {
2665 //     R = Pic[i];             // Member of index 0
2666 //     G = Pic[i+1];           // Member of index 1
2667 //     B = Pic[i+2];           // Member of index 2
2668 //     ... // do something to R, G, B
2669 //   }
2670 // To:
2671 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2672 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2673 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2674 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2675 //
2676 // Or translate following interleaved store group (factor = 3):
2677 //   for (i = 0; i < N; i+=3) {
2678 //     ... do something to R, G, B
2679 //     Pic[i]   = R;           // Member of index 0
2680 //     Pic[i+1] = G;           // Member of index 1
2681 //     Pic[i+2] = B;           // Member of index 2
2682 //   }
2683 // To:
2684 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2685 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2686 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2687 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2688 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2689 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2690     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2691     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2692     VPValue *BlockInMask) {
2693   Instruction *Instr = Group->getInsertPos();
2694   const DataLayout &DL = Instr->getModule()->getDataLayout();
2695 
2696   // Prepare for the vector type of the interleaved load/store.
2697   Type *ScalarTy = getLoadStoreType(Instr);
2698   unsigned InterleaveFactor = Group->getFactor();
2699   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2700   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2701 
2702   // Prepare for the new pointers.
2703   SmallVector<Value *, 2> AddrParts;
2704   unsigned Index = Group->getIndex(Instr);
2705 
2706   // TODO: extend the masked interleaved-group support to reversed access.
2707   assert((!BlockInMask || !Group->isReverse()) &&
2708          "Reversed masked interleave-group not supported.");
2709 
2710   // If the group is reverse, adjust the index to refer to the last vector lane
2711   // instead of the first. We adjust the index from the first vector lane,
2712   // rather than directly getting the pointer for lane VF - 1, because the
2713   // pointer operand of the interleaved access is supposed to be uniform. For
2714   // uniform instructions, we're only required to generate a value for the
2715   // first vector lane in each unroll iteration.
2716   if (Group->isReverse())
2717     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2718 
2719   for (unsigned Part = 0; Part < UF; Part++) {
2720     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2721     setDebugLocFromInst(Builder, AddrPart);
2722 
2723     // Notice current instruction could be any index. Need to adjust the address
2724     // to the member of index 0.
2725     //
2726     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2727     //       b = A[i];       // Member of index 0
2728     // Current pointer is pointed to A[i+1], adjust it to A[i].
2729     //
2730     // E.g.  A[i+1] = a;     // Member of index 1
2731     //       A[i]   = b;     // Member of index 0
2732     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2733     // Current pointer is pointed to A[i+2], adjust it to A[i].
2734 
2735     bool InBounds = false;
2736     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2737       InBounds = gep->isInBounds();
2738     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2739     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2740 
2741     // Cast to the vector pointer type.
2742     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2743     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2744     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2745   }
2746 
2747   setDebugLocFromInst(Builder, Instr);
2748   Value *PoisonVec = PoisonValue::get(VecTy);
2749 
2750   Value *MaskForGaps = nullptr;
2751   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2752     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2753     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2754   }
2755 
2756   // Vectorize the interleaved load group.
2757   if (isa<LoadInst>(Instr)) {
2758     // For each unroll part, create a wide load for the group.
2759     SmallVector<Value *, 2> NewLoads;
2760     for (unsigned Part = 0; Part < UF; Part++) {
2761       Instruction *NewLoad;
2762       if (BlockInMask || MaskForGaps) {
2763         assert(useMaskedInterleavedAccesses(*TTI) &&
2764                "masked interleaved groups are not allowed.");
2765         Value *GroupMask = MaskForGaps;
2766         if (BlockInMask) {
2767           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2768           Value *ShuffledMask = Builder.CreateShuffleVector(
2769               BlockInMaskPart,
2770               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2771               "interleaved.mask");
2772           GroupMask = MaskForGaps
2773                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2774                                                 MaskForGaps)
2775                           : ShuffledMask;
2776         }
2777         NewLoad =
2778             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2779                                      GroupMask, PoisonVec, "wide.masked.vec");
2780       }
2781       else
2782         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2783                                             Group->getAlign(), "wide.vec");
2784       Group->addMetadata(NewLoad);
2785       NewLoads.push_back(NewLoad);
2786     }
2787 
2788     // For each member in the group, shuffle out the appropriate data from the
2789     // wide loads.
2790     unsigned J = 0;
2791     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2792       Instruction *Member = Group->getMember(I);
2793 
2794       // Skip the gaps in the group.
2795       if (!Member)
2796         continue;
2797 
2798       auto StrideMask =
2799           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2800       for (unsigned Part = 0; Part < UF; Part++) {
2801         Value *StridedVec = Builder.CreateShuffleVector(
2802             NewLoads[Part], StrideMask, "strided.vec");
2803 
2804         // If this member has different type, cast the result type.
2805         if (Member->getType() != ScalarTy) {
2806           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2807           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2808           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2809         }
2810 
2811         if (Group->isReverse())
2812           StridedVec = reverseVector(StridedVec);
2813 
2814         State.set(VPDefs[J], StridedVec, Part);
2815       }
2816       ++J;
2817     }
2818     return;
2819   }
2820 
2821   // The sub vector type for current instruction.
2822   auto *SubVT = VectorType::get(ScalarTy, VF);
2823 
2824   // Vectorize the interleaved store group.
2825   for (unsigned Part = 0; Part < UF; Part++) {
2826     // Collect the stored vector from each member.
2827     SmallVector<Value *, 4> StoredVecs;
2828     for (unsigned i = 0; i < InterleaveFactor; i++) {
2829       // Interleaved store group doesn't allow a gap, so each index has a member
2830       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2831 
2832       Value *StoredVec = State.get(StoredValues[i], Part);
2833 
2834       if (Group->isReverse())
2835         StoredVec = reverseVector(StoredVec);
2836 
2837       // If this member has different type, cast it to a unified type.
2838 
2839       if (StoredVec->getType() != SubVT)
2840         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2841 
2842       StoredVecs.push_back(StoredVec);
2843     }
2844 
2845     // Concatenate all vectors into a wide vector.
2846     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2847 
2848     // Interleave the elements in the wide vector.
2849     Value *IVec = Builder.CreateShuffleVector(
2850         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2851         "interleaved.vec");
2852 
2853     Instruction *NewStoreInstr;
2854     if (BlockInMask) {
2855       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2856       Value *ShuffledMask = Builder.CreateShuffleVector(
2857           BlockInMaskPart,
2858           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2859           "interleaved.mask");
2860       NewStoreInstr = Builder.CreateMaskedStore(
2861           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2862     }
2863     else
2864       NewStoreInstr =
2865           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2866 
2867     Group->addMetadata(NewStoreInstr);
2868   }
2869 }
2870 
2871 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2872     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2873     VPValue *StoredValue, VPValue *BlockInMask) {
2874   // Attempt to issue a wide load.
2875   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2876   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2877 
2878   assert((LI || SI) && "Invalid Load/Store instruction");
2879   assert((!SI || StoredValue) && "No stored value provided for widened store");
2880   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2881 
2882   LoopVectorizationCostModel::InstWidening Decision =
2883       Cost->getWideningDecision(Instr, VF);
2884   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2885           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2886           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2887          "CM decision is not to widen the memory instruction");
2888 
2889   Type *ScalarDataTy = getLoadStoreType(Instr);
2890 
2891   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2892   const Align Alignment = getLoadStoreAlignment(Instr);
2893 
2894   // Determine if the pointer operand of the access is either consecutive or
2895   // reverse consecutive.
2896   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2897   bool ConsecutiveStride =
2898       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2899   bool CreateGatherScatter =
2900       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2901 
2902   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2903   // gather/scatter. Otherwise Decision should have been to Scalarize.
2904   assert((ConsecutiveStride || CreateGatherScatter) &&
2905          "The instruction should be scalarized");
2906   (void)ConsecutiveStride;
2907 
2908   VectorParts BlockInMaskParts(UF);
2909   bool isMaskRequired = BlockInMask;
2910   if (isMaskRequired)
2911     for (unsigned Part = 0; Part < UF; ++Part)
2912       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2913 
2914   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2915     // Calculate the pointer for the specific unroll-part.
2916     GetElementPtrInst *PartPtr = nullptr;
2917 
2918     bool InBounds = false;
2919     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2920       InBounds = gep->isInBounds();
2921     if (Reverse) {
2922       // If the address is consecutive but reversed, then the
2923       // wide store needs to start at the last vector element.
2924       // RunTimeVF =  VScale * VF.getKnownMinValue()
2925       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
2926       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF);
2927       // NumElt = -Part * RunTimeVF
2928       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
2929       // LastLane = 1 - RunTimeVF
2930       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
2931       PartPtr =
2932           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
2933       PartPtr->setIsInBounds(InBounds);
2934       PartPtr = cast<GetElementPtrInst>(
2935           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
2936       PartPtr->setIsInBounds(InBounds);
2937       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2938         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2939     } else {
2940       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2941       PartPtr = cast<GetElementPtrInst>(
2942           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2943       PartPtr->setIsInBounds(InBounds);
2944     }
2945 
2946     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2947     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2948   };
2949 
2950   // Handle Stores:
2951   if (SI) {
2952     setDebugLocFromInst(Builder, SI);
2953 
2954     for (unsigned Part = 0; Part < UF; ++Part) {
2955       Instruction *NewSI = nullptr;
2956       Value *StoredVal = State.get(StoredValue, Part);
2957       if (CreateGatherScatter) {
2958         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2959         Value *VectorGep = State.get(Addr, Part);
2960         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2961                                             MaskPart);
2962       } else {
2963         if (Reverse) {
2964           // If we store to reverse consecutive memory locations, then we need
2965           // to reverse the order of elements in the stored value.
2966           StoredVal = reverseVector(StoredVal);
2967           // We don't want to update the value in the map as it might be used in
2968           // another expression. So don't call resetVectorValue(StoredVal).
2969         }
2970         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2971         if (isMaskRequired)
2972           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2973                                             BlockInMaskParts[Part]);
2974         else
2975           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2976       }
2977       addMetadata(NewSI, SI);
2978     }
2979     return;
2980   }
2981 
2982   // Handle loads.
2983   assert(LI && "Must have a load instruction");
2984   setDebugLocFromInst(Builder, LI);
2985   for (unsigned Part = 0; Part < UF; ++Part) {
2986     Value *NewLI;
2987     if (CreateGatherScatter) {
2988       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2989       Value *VectorGep = State.get(Addr, Part);
2990       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2991                                          nullptr, "wide.masked.gather");
2992       addMetadata(NewLI, LI);
2993     } else {
2994       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2995       if (isMaskRequired)
2996         NewLI = Builder.CreateMaskedLoad(
2997             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2998             "wide.masked.load");
2999       else
3000         NewLI =
3001             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
3002 
3003       // Add metadata to the load, but setVectorValue to the reverse shuffle.
3004       addMetadata(NewLI, LI);
3005       if (Reverse)
3006         NewLI = reverseVector(NewLI);
3007     }
3008 
3009     State.set(Def, NewLI, Part);
3010   }
3011 }
3012 
3013 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
3014                                                VPUser &User,
3015                                                const VPIteration &Instance,
3016                                                bool IfPredicateInstr,
3017                                                VPTransformState &State) {
3018   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3019 
3020   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
3021   // the first lane and part.
3022   if (isa<NoAliasScopeDeclInst>(Instr))
3023     if (!Instance.isFirstIteration())
3024       return;
3025 
3026   setDebugLocFromInst(Builder, Instr);
3027 
3028   // Does this instruction return a value ?
3029   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3030 
3031   Instruction *Cloned = Instr->clone();
3032   if (!IsVoidRetTy)
3033     Cloned->setName(Instr->getName() + ".cloned");
3034 
3035   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3036                                Builder.GetInsertPoint());
3037   // Replace the operands of the cloned instructions with their scalar
3038   // equivalents in the new loop.
3039   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
3040     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3041     auto InputInstance = Instance;
3042     if (!Operand || !OrigLoop->contains(Operand) ||
3043         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3044       InputInstance.Lane = VPLane::getFirstLane();
3045     auto *NewOp = State.get(User.getOperand(op), InputInstance);
3046     Cloned->setOperand(op, NewOp);
3047   }
3048   addNewMetadata(Cloned, Instr);
3049 
3050   // Place the cloned scalar in the new loop.
3051   Builder.Insert(Cloned);
3052 
3053   State.set(Def, Cloned, Instance);
3054 
3055   // If we just cloned a new assumption, add it the assumption cache.
3056   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3057     AC->registerAssumption(II);
3058 
3059   // End if-block.
3060   if (IfPredicateInstr)
3061     PredicatedInstructions.push_back(Cloned);
3062 }
3063 
3064 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3065                                                       Value *End, Value *Step,
3066                                                       Instruction *DL) {
3067   BasicBlock *Header = L->getHeader();
3068   BasicBlock *Latch = L->getLoopLatch();
3069   // As we're just creating this loop, it's possible no latch exists
3070   // yet. If so, use the header as this will be a single block loop.
3071   if (!Latch)
3072     Latch = Header;
3073 
3074   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3075   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3076   setDebugLocFromInst(Builder, OldInst);
3077   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3078 
3079   Builder.SetInsertPoint(Latch->getTerminator());
3080   setDebugLocFromInst(Builder, OldInst);
3081 
3082   // Create i+1 and fill the PHINode.
3083   //
3084   // If the tail is not folded, we know that End - Start >= Step (either
3085   // statically or through the minimum iteration checks). We also know that both
3086   // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV +
3087   // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned
3088   // overflows and we can mark the induction increment as NUW.
3089   Value *Next =
3090       Builder.CreateAdd(Induction, Step, "index.next",
3091                         /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false);
3092   Induction->addIncoming(Start, L->getLoopPreheader());
3093   Induction->addIncoming(Next, Latch);
3094   // Create the compare.
3095   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3096   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3097 
3098   // Now we have two terminators. Remove the old one from the block.
3099   Latch->getTerminator()->eraseFromParent();
3100 
3101   return Induction;
3102 }
3103 
3104 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3105   if (TripCount)
3106     return TripCount;
3107 
3108   assert(L && "Create Trip Count for null loop.");
3109   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3110   // Find the loop boundaries.
3111   ScalarEvolution *SE = PSE.getSE();
3112   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3113   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3114          "Invalid loop count");
3115 
3116   Type *IdxTy = Legal->getWidestInductionType();
3117   assert(IdxTy && "No type for induction");
3118 
3119   // The exit count might have the type of i64 while the phi is i32. This can
3120   // happen if we have an induction variable that is sign extended before the
3121   // compare. The only way that we get a backedge taken count is that the
3122   // induction variable was signed and as such will not overflow. In such a case
3123   // truncation is legal.
3124   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3125       IdxTy->getPrimitiveSizeInBits())
3126     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3127   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3128 
3129   // Get the total trip count from the count by adding 1.
3130   const SCEV *ExitCount = SE->getAddExpr(
3131       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3132 
3133   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3134 
3135   // Expand the trip count and place the new instructions in the preheader.
3136   // Notice that the pre-header does not change, only the loop body.
3137   SCEVExpander Exp(*SE, DL, "induction");
3138 
3139   // Count holds the overall loop count (N).
3140   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3141                                 L->getLoopPreheader()->getTerminator());
3142 
3143   if (TripCount->getType()->isPointerTy())
3144     TripCount =
3145         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3146                                     L->getLoopPreheader()->getTerminator());
3147 
3148   return TripCount;
3149 }
3150 
3151 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3152   if (VectorTripCount)
3153     return VectorTripCount;
3154 
3155   Value *TC = getOrCreateTripCount(L);
3156   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3157 
3158   Type *Ty = TC->getType();
3159   // This is where we can make the step a runtime constant.
3160   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3161 
3162   // If the tail is to be folded by masking, round the number of iterations N
3163   // up to a multiple of Step instead of rounding down. This is done by first
3164   // adding Step-1 and then rounding down. Note that it's ok if this addition
3165   // overflows: the vector induction variable will eventually wrap to zero given
3166   // that it starts at zero and its Step is a power of two; the loop will then
3167   // exit, with the last early-exit vector comparison also producing all-true.
3168   if (Cost->foldTailByMasking()) {
3169     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3170            "VF*UF must be a power of 2 when folding tail by masking");
3171     assert(!VF.isScalable() &&
3172            "Tail folding not yet supported for scalable vectors");
3173     TC = Builder.CreateAdd(
3174         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3175   }
3176 
3177   // Now we need to generate the expression for the part of the loop that the
3178   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3179   // iterations are not required for correctness, or N - Step, otherwise. Step
3180   // is equal to the vectorization factor (number of SIMD elements) times the
3181   // unroll factor (number of SIMD instructions).
3182   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3183 
3184   // There are cases where we *must* run at least one iteration in the remainder
3185   // loop.  See the cost model for when this can happen.  If the step evenly
3186   // divides the trip count, we set the remainder to be equal to the step. If
3187   // the step does not evenly divide the trip count, no adjustment is necessary
3188   // since there will already be scalar iterations. Note that the minimum
3189   // iterations check ensures that N >= Step.
3190   if (Cost->requiresScalarEpilogue(VF)) {
3191     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3192     R = Builder.CreateSelect(IsZero, Step, R);
3193   }
3194 
3195   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3196 
3197   return VectorTripCount;
3198 }
3199 
3200 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3201                                                    const DataLayout &DL) {
3202   // Verify that V is a vector type with same number of elements as DstVTy.
3203   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3204   unsigned VF = DstFVTy->getNumElements();
3205   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3206   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3207   Type *SrcElemTy = SrcVecTy->getElementType();
3208   Type *DstElemTy = DstFVTy->getElementType();
3209   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3210          "Vector elements must have same size");
3211 
3212   // Do a direct cast if element types are castable.
3213   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3214     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3215   }
3216   // V cannot be directly casted to desired vector type.
3217   // May happen when V is a floating point vector but DstVTy is a vector of
3218   // pointers or vice-versa. Handle this using a two-step bitcast using an
3219   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3220   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3221          "Only one type should be a pointer type");
3222   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3223          "Only one type should be a floating point type");
3224   Type *IntTy =
3225       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3226   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3227   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3228   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3229 }
3230 
3231 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3232                                                          BasicBlock *Bypass) {
3233   Value *Count = getOrCreateTripCount(L);
3234   // Reuse existing vector loop preheader for TC checks.
3235   // Note that new preheader block is generated for vector loop.
3236   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3237   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3238 
3239   // Generate code to check if the loop's trip count is less than VF * UF, or
3240   // equal to it in case a scalar epilogue is required; this implies that the
3241   // vector trip count is zero. This check also covers the case where adding one
3242   // to the backedge-taken count overflowed leading to an incorrect trip count
3243   // of zero. In this case we will also jump to the scalar loop.
3244   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3245                                             : ICmpInst::ICMP_ULT;
3246 
3247   // If tail is to be folded, vector loop takes care of all iterations.
3248   Value *CheckMinIters = Builder.getFalse();
3249   if (!Cost->foldTailByMasking()) {
3250     Value *Step =
3251         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3252     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3253   }
3254   // Create new preheader for vector loop.
3255   LoopVectorPreHeader =
3256       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3257                  "vector.ph");
3258 
3259   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3260                                DT->getNode(Bypass)->getIDom()) &&
3261          "TC check is expected to dominate Bypass");
3262 
3263   // Update dominator for Bypass & LoopExit.
3264   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3265   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3266 
3267   ReplaceInstWithInst(
3268       TCCheckBlock->getTerminator(),
3269       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3270   LoopBypassBlocks.push_back(TCCheckBlock);
3271 }
3272 
3273 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3274 
3275   BasicBlock *const SCEVCheckBlock =
3276       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3277   if (!SCEVCheckBlock)
3278     return nullptr;
3279 
3280   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3281            (OptForSizeBasedOnProfile &&
3282             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3283          "Cannot SCEV check stride or overflow when optimizing for size");
3284 
3285 
3286   // Update dominator only if this is first RT check.
3287   if (LoopBypassBlocks.empty()) {
3288     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3289     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3290   }
3291 
3292   LoopBypassBlocks.push_back(SCEVCheckBlock);
3293   AddedSafetyChecks = true;
3294   return SCEVCheckBlock;
3295 }
3296 
3297 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3298                                                       BasicBlock *Bypass) {
3299   // VPlan-native path does not do any analysis for runtime checks currently.
3300   if (EnableVPlanNativePath)
3301     return nullptr;
3302 
3303   BasicBlock *const MemCheckBlock =
3304       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3305 
3306   // Check if we generated code that checks in runtime if arrays overlap. We put
3307   // the checks into a separate block to make the more common case of few
3308   // elements faster.
3309   if (!MemCheckBlock)
3310     return nullptr;
3311 
3312   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3313     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3314            "Cannot emit memory checks when optimizing for size, unless forced "
3315            "to vectorize.");
3316     ORE->emit([&]() {
3317       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3318                                         L->getStartLoc(), L->getHeader())
3319              << "Code-size may be reduced by not forcing "
3320                 "vectorization, or by source-code modifications "
3321                 "eliminating the need for runtime checks "
3322                 "(e.g., adding 'restrict').";
3323     });
3324   }
3325 
3326   LoopBypassBlocks.push_back(MemCheckBlock);
3327 
3328   AddedSafetyChecks = true;
3329 
3330   // We currently don't use LoopVersioning for the actual loop cloning but we
3331   // still use it to add the noalias metadata.
3332   LVer = std::make_unique<LoopVersioning>(
3333       *Legal->getLAI(),
3334       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3335       DT, PSE.getSE());
3336   LVer->prepareNoAliasMetadata();
3337   return MemCheckBlock;
3338 }
3339 
3340 Value *InnerLoopVectorizer::emitTransformedIndex(
3341     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3342     const InductionDescriptor &ID) const {
3343 
3344   SCEVExpander Exp(*SE, DL, "induction");
3345   auto Step = ID.getStep();
3346   auto StartValue = ID.getStartValue();
3347   assert(Index->getType()->getScalarType() == Step->getType() &&
3348          "Index scalar type does not match StepValue type");
3349 
3350   // Note: the IR at this point is broken. We cannot use SE to create any new
3351   // SCEV and then expand it, hoping that SCEV's simplification will give us
3352   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3353   // lead to various SCEV crashes. So all we can do is to use builder and rely
3354   // on InstCombine for future simplifications. Here we handle some trivial
3355   // cases only.
3356   auto CreateAdd = [&B](Value *X, Value *Y) {
3357     assert(X->getType() == Y->getType() && "Types don't match!");
3358     if (auto *CX = dyn_cast<ConstantInt>(X))
3359       if (CX->isZero())
3360         return Y;
3361     if (auto *CY = dyn_cast<ConstantInt>(Y))
3362       if (CY->isZero())
3363         return X;
3364     return B.CreateAdd(X, Y);
3365   };
3366 
3367   // We allow X to be a vector type, in which case Y will potentially be
3368   // splatted into a vector with the same element count.
3369   auto CreateMul = [&B](Value *X, Value *Y) {
3370     assert(X->getType()->getScalarType() == Y->getType() &&
3371            "Types don't match!");
3372     if (auto *CX = dyn_cast<ConstantInt>(X))
3373       if (CX->isOne())
3374         return Y;
3375     if (auto *CY = dyn_cast<ConstantInt>(Y))
3376       if (CY->isOne())
3377         return X;
3378     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3379     if (XVTy && !isa<VectorType>(Y->getType()))
3380       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3381     return B.CreateMul(X, Y);
3382   };
3383 
3384   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3385   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3386   // the DomTree is not kept up-to-date for additional blocks generated in the
3387   // vector loop. By using the header as insertion point, we guarantee that the
3388   // expanded instructions dominate all their uses.
3389   auto GetInsertPoint = [this, &B]() {
3390     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3391     if (InsertBB != LoopVectorBody &&
3392         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3393       return LoopVectorBody->getTerminator();
3394     return &*B.GetInsertPoint();
3395   };
3396 
3397   switch (ID.getKind()) {
3398   case InductionDescriptor::IK_IntInduction: {
3399     assert(!isa<VectorType>(Index->getType()) &&
3400            "Vector indices not supported for integer inductions yet");
3401     assert(Index->getType() == StartValue->getType() &&
3402            "Index type does not match StartValue type");
3403     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3404       return B.CreateSub(StartValue, Index);
3405     auto *Offset = CreateMul(
3406         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3407     return CreateAdd(StartValue, Offset);
3408   }
3409   case InductionDescriptor::IK_PtrInduction: {
3410     assert(isa<SCEVConstant>(Step) &&
3411            "Expected constant step for pointer induction");
3412     return B.CreateGEP(
3413         StartValue->getType()->getPointerElementType(), StartValue,
3414         CreateMul(Index,
3415                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3416                                     GetInsertPoint())));
3417   }
3418   case InductionDescriptor::IK_FpInduction: {
3419     assert(!isa<VectorType>(Index->getType()) &&
3420            "Vector indices not supported for FP inductions yet");
3421     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3422     auto InductionBinOp = ID.getInductionBinOp();
3423     assert(InductionBinOp &&
3424            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3425             InductionBinOp->getOpcode() == Instruction::FSub) &&
3426            "Original bin op should be defined for FP induction");
3427 
3428     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3429     Value *MulExp = B.CreateFMul(StepValue, Index);
3430     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3431                          "induction");
3432   }
3433   case InductionDescriptor::IK_NoInduction:
3434     return nullptr;
3435   }
3436   llvm_unreachable("invalid enum");
3437 }
3438 
3439 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3440   LoopScalarBody = OrigLoop->getHeader();
3441   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3442   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3443   assert(LoopExitBlock && "Must have an exit block");
3444   assert(LoopVectorPreHeader && "Invalid loop structure");
3445 
3446   LoopMiddleBlock =
3447       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3448                  LI, nullptr, Twine(Prefix) + "middle.block");
3449   LoopScalarPreHeader =
3450       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3451                  nullptr, Twine(Prefix) + "scalar.ph");
3452 
3453   // Set up branch from middle block to the exit and scalar preheader blocks.
3454   // completeLoopSkeleton will update the condition to use an iteration check,
3455   // if required to decide whether to execute the remainder.
3456   BranchInst *BrInst =
3457       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3458   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3459   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3460   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3461 
3462   // We intentionally don't let SplitBlock to update LoopInfo since
3463   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3464   // LoopVectorBody is explicitly added to the correct place few lines later.
3465   LoopVectorBody =
3466       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3467                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3468 
3469   // Update dominator for loop exit.
3470   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3471 
3472   // Create and register the new vector loop.
3473   Loop *Lp = LI->AllocateLoop();
3474   Loop *ParentLoop = OrigLoop->getParentLoop();
3475 
3476   // Insert the new loop into the loop nest and register the new basic blocks
3477   // before calling any utilities such as SCEV that require valid LoopInfo.
3478   if (ParentLoop) {
3479     ParentLoop->addChildLoop(Lp);
3480   } else {
3481     LI->addTopLevelLoop(Lp);
3482   }
3483   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3484   return Lp;
3485 }
3486 
3487 void InnerLoopVectorizer::createInductionResumeValues(
3488     Loop *L, Value *VectorTripCount,
3489     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3490   assert(VectorTripCount && L && "Expected valid arguments");
3491   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3492           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3493          "Inconsistent information about additional bypass.");
3494   // We are going to resume the execution of the scalar loop.
3495   // Go over all of the induction variables that we found and fix the
3496   // PHIs that are left in the scalar version of the loop.
3497   // The starting values of PHI nodes depend on the counter of the last
3498   // iteration in the vectorized loop.
3499   // If we come from a bypass edge then we need to start from the original
3500   // start value.
3501   for (auto &InductionEntry : Legal->getInductionVars()) {
3502     PHINode *OrigPhi = InductionEntry.first;
3503     InductionDescriptor II = InductionEntry.second;
3504 
3505     // Create phi nodes to merge from the  backedge-taken check block.
3506     PHINode *BCResumeVal =
3507         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3508                         LoopScalarPreHeader->getTerminator());
3509     // Copy original phi DL over to the new one.
3510     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3511     Value *&EndValue = IVEndValues[OrigPhi];
3512     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3513     if (OrigPhi == OldInduction) {
3514       // We know what the end value is.
3515       EndValue = VectorTripCount;
3516     } else {
3517       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3518 
3519       // Fast-math-flags propagate from the original induction instruction.
3520       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3521         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3522 
3523       Type *StepType = II.getStep()->getType();
3524       Instruction::CastOps CastOp =
3525           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3526       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3527       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3528       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3529       EndValue->setName("ind.end");
3530 
3531       // Compute the end value for the additional bypass (if applicable).
3532       if (AdditionalBypass.first) {
3533         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3534         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3535                                          StepType, true);
3536         CRD =
3537             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3538         EndValueFromAdditionalBypass =
3539             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3540         EndValueFromAdditionalBypass->setName("ind.end");
3541       }
3542     }
3543     // The new PHI merges the original incoming value, in case of a bypass,
3544     // or the value at the end of the vectorized loop.
3545     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3546 
3547     // Fix the scalar body counter (PHI node).
3548     // The old induction's phi node in the scalar body needs the truncated
3549     // value.
3550     for (BasicBlock *BB : LoopBypassBlocks)
3551       BCResumeVal->addIncoming(II.getStartValue(), BB);
3552 
3553     if (AdditionalBypass.first)
3554       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3555                                             EndValueFromAdditionalBypass);
3556 
3557     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3558   }
3559 }
3560 
3561 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3562                                                       MDNode *OrigLoopID) {
3563   assert(L && "Expected valid loop.");
3564 
3565   // The trip counts should be cached by now.
3566   Value *Count = getOrCreateTripCount(L);
3567   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3568 
3569   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3570 
3571   // Add a check in the middle block to see if we have completed
3572   // all of the iterations in the first vector loop.
3573   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3574   // If tail is to be folded, we know we don't need to run the remainder.
3575   if (!Cost->foldTailByMasking()) {
3576     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3577                                         Count, VectorTripCount, "cmp.n",
3578                                         LoopMiddleBlock->getTerminator());
3579 
3580     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3581     // of the corresponding compare because they may have ended up with
3582     // different line numbers and we want to avoid awkward line stepping while
3583     // debugging. Eg. if the compare has got a line number inside the loop.
3584     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3585     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3586   }
3587 
3588   // Get ready to start creating new instructions into the vectorized body.
3589   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3590          "Inconsistent vector loop preheader");
3591   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3592 
3593   Optional<MDNode *> VectorizedLoopID =
3594       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3595                                       LLVMLoopVectorizeFollowupVectorized});
3596   if (VectorizedLoopID.hasValue()) {
3597     L->setLoopID(VectorizedLoopID.getValue());
3598 
3599     // Do not setAlreadyVectorized if loop attributes have been defined
3600     // explicitly.
3601     return LoopVectorPreHeader;
3602   }
3603 
3604   // Keep all loop hints from the original loop on the vector loop (we'll
3605   // replace the vectorizer-specific hints below).
3606   if (MDNode *LID = OrigLoop->getLoopID())
3607     L->setLoopID(LID);
3608 
3609   LoopVectorizeHints Hints(L, true, *ORE);
3610   Hints.setAlreadyVectorized();
3611 
3612 #ifdef EXPENSIVE_CHECKS
3613   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3614   LI->verify(*DT);
3615 #endif
3616 
3617   return LoopVectorPreHeader;
3618 }
3619 
3620 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3621   /*
3622    In this function we generate a new loop. The new loop will contain
3623    the vectorized instructions while the old loop will continue to run the
3624    scalar remainder.
3625 
3626        [ ] <-- loop iteration number check.
3627     /   |
3628    /    v
3629   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3630   |  /  |
3631   | /   v
3632   ||   [ ]     <-- vector pre header.
3633   |/    |
3634   |     v
3635   |    [  ] \
3636   |    [  ]_|   <-- vector loop.
3637   |     |
3638   |     v
3639   |   -[ ]   <--- middle-block.
3640   |  /  |
3641   | /   v
3642   -|- >[ ]     <--- new preheader.
3643    |    |
3644    |    v
3645    |   [ ] \
3646    |   [ ]_|   <-- old scalar loop to handle remainder.
3647     \   |
3648      \  v
3649       >[ ]     <-- exit block.
3650    ...
3651    */
3652 
3653   // Get the metadata of the original loop before it gets modified.
3654   MDNode *OrigLoopID = OrigLoop->getLoopID();
3655 
3656   // Workaround!  Compute the trip count of the original loop and cache it
3657   // before we start modifying the CFG.  This code has a systemic problem
3658   // wherein it tries to run analysis over partially constructed IR; this is
3659   // wrong, and not simply for SCEV.  The trip count of the original loop
3660   // simply happens to be prone to hitting this in practice.  In theory, we
3661   // can hit the same issue for any SCEV, or ValueTracking query done during
3662   // mutation.  See PR49900.
3663   getOrCreateTripCount(OrigLoop);
3664 
3665   // Create an empty vector loop, and prepare basic blocks for the runtime
3666   // checks.
3667   Loop *Lp = createVectorLoopSkeleton("");
3668 
3669   // Now, compare the new count to zero. If it is zero skip the vector loop and
3670   // jump to the scalar loop. This check also covers the case where the
3671   // backedge-taken count is uint##_max: adding one to it will overflow leading
3672   // to an incorrect trip count of zero. In this (rare) case we will also jump
3673   // to the scalar loop.
3674   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3675 
3676   // Generate the code to check any assumptions that we've made for SCEV
3677   // expressions.
3678   emitSCEVChecks(Lp, LoopScalarPreHeader);
3679 
3680   // Generate the code that checks in runtime if arrays overlap. We put the
3681   // checks into a separate block to make the more common case of few elements
3682   // faster.
3683   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3684 
3685   // Some loops have a single integer induction variable, while other loops
3686   // don't. One example is c++ iterators that often have multiple pointer
3687   // induction variables. In the code below we also support a case where we
3688   // don't have a single induction variable.
3689   //
3690   // We try to obtain an induction variable from the original loop as hard
3691   // as possible. However if we don't find one that:
3692   //   - is an integer
3693   //   - counts from zero, stepping by one
3694   //   - is the size of the widest induction variable type
3695   // then we create a new one.
3696   OldInduction = Legal->getPrimaryInduction();
3697   Type *IdxTy = Legal->getWidestInductionType();
3698   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3699   // The loop step is equal to the vectorization factor (num of SIMD elements)
3700   // times the unroll factor (num of SIMD instructions).
3701   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3702   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3703   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3704   Induction =
3705       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3706                               getDebugLocFromInstOrOperands(OldInduction));
3707 
3708   // Emit phis for the new starting index of the scalar loop.
3709   createInductionResumeValues(Lp, CountRoundDown);
3710 
3711   return completeLoopSkeleton(Lp, OrigLoopID);
3712 }
3713 
3714 // Fix up external users of the induction variable. At this point, we are
3715 // in LCSSA form, with all external PHIs that use the IV having one input value,
3716 // coming from the remainder loop. We need those PHIs to also have a correct
3717 // value for the IV when arriving directly from the middle block.
3718 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3719                                        const InductionDescriptor &II,
3720                                        Value *CountRoundDown, Value *EndValue,
3721                                        BasicBlock *MiddleBlock) {
3722   // There are two kinds of external IV usages - those that use the value
3723   // computed in the last iteration (the PHI) and those that use the penultimate
3724   // value (the value that feeds into the phi from the loop latch).
3725   // We allow both, but they, obviously, have different values.
3726 
3727   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3728 
3729   DenseMap<Value *, Value *> MissingVals;
3730 
3731   // An external user of the last iteration's value should see the value that
3732   // the remainder loop uses to initialize its own IV.
3733   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3734   for (User *U : PostInc->users()) {
3735     Instruction *UI = cast<Instruction>(U);
3736     if (!OrigLoop->contains(UI)) {
3737       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3738       MissingVals[UI] = EndValue;
3739     }
3740   }
3741 
3742   // An external user of the penultimate value need to see EndValue - Step.
3743   // The simplest way to get this is to recompute it from the constituent SCEVs,
3744   // that is Start + (Step * (CRD - 1)).
3745   for (User *U : OrigPhi->users()) {
3746     auto *UI = cast<Instruction>(U);
3747     if (!OrigLoop->contains(UI)) {
3748       const DataLayout &DL =
3749           OrigLoop->getHeader()->getModule()->getDataLayout();
3750       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3751 
3752       IRBuilder<> B(MiddleBlock->getTerminator());
3753 
3754       // Fast-math-flags propagate from the original induction instruction.
3755       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3756         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3757 
3758       Value *CountMinusOne = B.CreateSub(
3759           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3760       Value *CMO =
3761           !II.getStep()->getType()->isIntegerTy()
3762               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3763                              II.getStep()->getType())
3764               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3765       CMO->setName("cast.cmo");
3766       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3767       Escape->setName("ind.escape");
3768       MissingVals[UI] = Escape;
3769     }
3770   }
3771 
3772   for (auto &I : MissingVals) {
3773     PHINode *PHI = cast<PHINode>(I.first);
3774     // One corner case we have to handle is two IVs "chasing" each-other,
3775     // that is %IV2 = phi [...], [ %IV1, %latch ]
3776     // In this case, if IV1 has an external use, we need to avoid adding both
3777     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3778     // don't already have an incoming value for the middle block.
3779     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3780       PHI->addIncoming(I.second, MiddleBlock);
3781   }
3782 }
3783 
3784 namespace {
3785 
3786 struct CSEDenseMapInfo {
3787   static bool canHandle(const Instruction *I) {
3788     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3789            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3790   }
3791 
3792   static inline Instruction *getEmptyKey() {
3793     return DenseMapInfo<Instruction *>::getEmptyKey();
3794   }
3795 
3796   static inline Instruction *getTombstoneKey() {
3797     return DenseMapInfo<Instruction *>::getTombstoneKey();
3798   }
3799 
3800   static unsigned getHashValue(const Instruction *I) {
3801     assert(canHandle(I) && "Unknown instruction!");
3802     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3803                                                            I->value_op_end()));
3804   }
3805 
3806   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3807     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3808         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3809       return LHS == RHS;
3810     return LHS->isIdenticalTo(RHS);
3811   }
3812 };
3813 
3814 } // end anonymous namespace
3815 
3816 ///Perform cse of induction variable instructions.
3817 static void cse(BasicBlock *BB) {
3818   // Perform simple cse.
3819   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3820   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3821     Instruction *In = &*I++;
3822 
3823     if (!CSEDenseMapInfo::canHandle(In))
3824       continue;
3825 
3826     // Check if we can replace this instruction with any of the
3827     // visited instructions.
3828     if (Instruction *V = CSEMap.lookup(In)) {
3829       In->replaceAllUsesWith(V);
3830       In->eraseFromParent();
3831       continue;
3832     }
3833 
3834     CSEMap[In] = In;
3835   }
3836 }
3837 
3838 InstructionCost
3839 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3840                                               bool &NeedToScalarize) const {
3841   Function *F = CI->getCalledFunction();
3842   Type *ScalarRetTy = CI->getType();
3843   SmallVector<Type *, 4> Tys, ScalarTys;
3844   for (auto &ArgOp : CI->arg_operands())
3845     ScalarTys.push_back(ArgOp->getType());
3846 
3847   // Estimate cost of scalarized vector call. The source operands are assumed
3848   // to be vectors, so we need to extract individual elements from there,
3849   // execute VF scalar calls, and then gather the result into the vector return
3850   // value.
3851   InstructionCost ScalarCallCost =
3852       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3853   if (VF.isScalar())
3854     return ScalarCallCost;
3855 
3856   // Compute corresponding vector type for return value and arguments.
3857   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3858   for (Type *ScalarTy : ScalarTys)
3859     Tys.push_back(ToVectorTy(ScalarTy, VF));
3860 
3861   // Compute costs of unpacking argument values for the scalar calls and
3862   // packing the return values to a vector.
3863   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3864 
3865   InstructionCost Cost =
3866       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3867 
3868   // If we can't emit a vector call for this function, then the currently found
3869   // cost is the cost we need to return.
3870   NeedToScalarize = true;
3871   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3872   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3873 
3874   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3875     return Cost;
3876 
3877   // If the corresponding vector cost is cheaper, return its cost.
3878   InstructionCost VectorCallCost =
3879       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3880   if (VectorCallCost < Cost) {
3881     NeedToScalarize = false;
3882     Cost = VectorCallCost;
3883   }
3884   return Cost;
3885 }
3886 
3887 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3888   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3889     return Elt;
3890   return VectorType::get(Elt, VF);
3891 }
3892 
3893 InstructionCost
3894 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3895                                                    ElementCount VF) const {
3896   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3897   assert(ID && "Expected intrinsic call!");
3898   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3899   FastMathFlags FMF;
3900   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3901     FMF = FPMO->getFastMathFlags();
3902 
3903   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3904   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3905   SmallVector<Type *> ParamTys;
3906   std::transform(FTy->param_begin(), FTy->param_end(),
3907                  std::back_inserter(ParamTys),
3908                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3909 
3910   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3911                                     dyn_cast<IntrinsicInst>(CI));
3912   return TTI.getIntrinsicInstrCost(CostAttrs,
3913                                    TargetTransformInfo::TCK_RecipThroughput);
3914 }
3915 
3916 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3917   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3918   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3919   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3920 }
3921 
3922 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3923   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3924   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3925   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3926 }
3927 
3928 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3929   // For every instruction `I` in MinBWs, truncate the operands, create a
3930   // truncated version of `I` and reextend its result. InstCombine runs
3931   // later and will remove any ext/trunc pairs.
3932   SmallPtrSet<Value *, 4> Erased;
3933   for (const auto &KV : Cost->getMinimalBitwidths()) {
3934     // If the value wasn't vectorized, we must maintain the original scalar
3935     // type. The absence of the value from State indicates that it
3936     // wasn't vectorized.
3937     VPValue *Def = State.Plan->getVPValue(KV.first);
3938     if (!State.hasAnyVectorValue(Def))
3939       continue;
3940     for (unsigned Part = 0; Part < UF; ++Part) {
3941       Value *I = State.get(Def, Part);
3942       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3943         continue;
3944       Type *OriginalTy = I->getType();
3945       Type *ScalarTruncatedTy =
3946           IntegerType::get(OriginalTy->getContext(), KV.second);
3947       auto *TruncatedTy = FixedVectorType::get(
3948           ScalarTruncatedTy,
3949           cast<FixedVectorType>(OriginalTy)->getNumElements());
3950       if (TruncatedTy == OriginalTy)
3951         continue;
3952 
3953       IRBuilder<> B(cast<Instruction>(I));
3954       auto ShrinkOperand = [&](Value *V) -> Value * {
3955         if (auto *ZI = dyn_cast<ZExtInst>(V))
3956           if (ZI->getSrcTy() == TruncatedTy)
3957             return ZI->getOperand(0);
3958         return B.CreateZExtOrTrunc(V, TruncatedTy);
3959       };
3960 
3961       // The actual instruction modification depends on the instruction type,
3962       // unfortunately.
3963       Value *NewI = nullptr;
3964       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3965         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3966                              ShrinkOperand(BO->getOperand(1)));
3967 
3968         // Any wrapping introduced by shrinking this operation shouldn't be
3969         // considered undefined behavior. So, we can't unconditionally copy
3970         // arithmetic wrapping flags to NewI.
3971         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3972       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3973         NewI =
3974             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3975                          ShrinkOperand(CI->getOperand(1)));
3976       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3977         NewI = B.CreateSelect(SI->getCondition(),
3978                               ShrinkOperand(SI->getTrueValue()),
3979                               ShrinkOperand(SI->getFalseValue()));
3980       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3981         switch (CI->getOpcode()) {
3982         default:
3983           llvm_unreachable("Unhandled cast!");
3984         case Instruction::Trunc:
3985           NewI = ShrinkOperand(CI->getOperand(0));
3986           break;
3987         case Instruction::SExt:
3988           NewI = B.CreateSExtOrTrunc(
3989               CI->getOperand(0),
3990               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3991           break;
3992         case Instruction::ZExt:
3993           NewI = B.CreateZExtOrTrunc(
3994               CI->getOperand(0),
3995               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3996           break;
3997         }
3998       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3999         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
4000                              ->getNumElements();
4001         auto *O0 = B.CreateZExtOrTrunc(
4002             SI->getOperand(0),
4003             FixedVectorType::get(ScalarTruncatedTy, Elements0));
4004         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
4005                              ->getNumElements();
4006         auto *O1 = B.CreateZExtOrTrunc(
4007             SI->getOperand(1),
4008             FixedVectorType::get(ScalarTruncatedTy, Elements1));
4009 
4010         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
4011       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
4012         // Don't do anything with the operands, just extend the result.
4013         continue;
4014       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
4015         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
4016                             ->getNumElements();
4017         auto *O0 = B.CreateZExtOrTrunc(
4018             IE->getOperand(0),
4019             FixedVectorType::get(ScalarTruncatedTy, Elements));
4020         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4021         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4022       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4023         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
4024                             ->getNumElements();
4025         auto *O0 = B.CreateZExtOrTrunc(
4026             EE->getOperand(0),
4027             FixedVectorType::get(ScalarTruncatedTy, Elements));
4028         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4029       } else {
4030         // If we don't know what to do, be conservative and don't do anything.
4031         continue;
4032       }
4033 
4034       // Lastly, extend the result.
4035       NewI->takeName(cast<Instruction>(I));
4036       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4037       I->replaceAllUsesWith(Res);
4038       cast<Instruction>(I)->eraseFromParent();
4039       Erased.insert(I);
4040       State.reset(Def, Res, Part);
4041     }
4042   }
4043 
4044   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4045   for (const auto &KV : Cost->getMinimalBitwidths()) {
4046     // If the value wasn't vectorized, we must maintain the original scalar
4047     // type. The absence of the value from State indicates that it
4048     // wasn't vectorized.
4049     VPValue *Def = State.Plan->getVPValue(KV.first);
4050     if (!State.hasAnyVectorValue(Def))
4051       continue;
4052     for (unsigned Part = 0; Part < UF; ++Part) {
4053       Value *I = State.get(Def, Part);
4054       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4055       if (Inst && Inst->use_empty()) {
4056         Value *NewI = Inst->getOperand(0);
4057         Inst->eraseFromParent();
4058         State.reset(Def, NewI, Part);
4059       }
4060     }
4061   }
4062 }
4063 
4064 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4065   // Insert truncates and extends for any truncated instructions as hints to
4066   // InstCombine.
4067   if (VF.isVector())
4068     truncateToMinimalBitwidths(State);
4069 
4070   // Fix widened non-induction PHIs by setting up the PHI operands.
4071   if (OrigPHIsToFix.size()) {
4072     assert(EnableVPlanNativePath &&
4073            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4074     fixNonInductionPHIs(State);
4075   }
4076 
4077   // At this point every instruction in the original loop is widened to a
4078   // vector form. Now we need to fix the recurrences in the loop. These PHI
4079   // nodes are currently empty because we did not want to introduce cycles.
4080   // This is the second stage of vectorizing recurrences.
4081   fixCrossIterationPHIs(State);
4082 
4083   // Forget the original basic block.
4084   PSE.getSE()->forgetLoop(OrigLoop);
4085 
4086   // Fix-up external users of the induction variables.
4087   for (auto &Entry : Legal->getInductionVars())
4088     fixupIVUsers(Entry.first, Entry.second,
4089                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4090                  IVEndValues[Entry.first], LoopMiddleBlock);
4091 
4092   fixLCSSAPHIs(State);
4093   for (Instruction *PI : PredicatedInstructions)
4094     sinkScalarOperands(&*PI);
4095 
4096   // Remove redundant induction instructions.
4097   cse(LoopVectorBody);
4098 
4099   // Set/update profile weights for the vector and remainder loops as original
4100   // loop iterations are now distributed among them. Note that original loop
4101   // represented by LoopScalarBody becomes remainder loop after vectorization.
4102   //
4103   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4104   // end up getting slightly roughened result but that should be OK since
4105   // profile is not inherently precise anyway. Note also possible bypass of
4106   // vector code caused by legality checks is ignored, assigning all the weight
4107   // to the vector loop, optimistically.
4108   //
4109   // For scalable vectorization we can't know at compile time how many iterations
4110   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4111   // vscale of '1'.
4112   setProfileInfoAfterUnrolling(
4113       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4114       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4115 }
4116 
4117 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4118   // In order to support recurrences we need to be able to vectorize Phi nodes.
4119   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4120   // stage #2: We now need to fix the recurrences by adding incoming edges to
4121   // the currently empty PHI nodes. At this point every instruction in the
4122   // original loop is widened to a vector form so we can use them to construct
4123   // the incoming edges.
4124   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4125   for (VPRecipeBase &R : Header->phis()) {
4126     auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R);
4127     if (!PhiR)
4128       continue;
4129     auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4130     if (PhiR->getRecurrenceDescriptor()) {
4131       fixReduction(PhiR, State);
4132     } else if (Legal->isFirstOrderRecurrence(OrigPhi))
4133       fixFirstOrderRecurrence(PhiR, State);
4134   }
4135 }
4136 
4137 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR,
4138                                                   VPTransformState &State) {
4139   // This is the second phase of vectorizing first-order recurrences. An
4140   // overview of the transformation is described below. Suppose we have the
4141   // following loop.
4142   //
4143   //   for (int i = 0; i < n; ++i)
4144   //     b[i] = a[i] - a[i - 1];
4145   //
4146   // There is a first-order recurrence on "a". For this loop, the shorthand
4147   // scalar IR looks like:
4148   //
4149   //   scalar.ph:
4150   //     s_init = a[-1]
4151   //     br scalar.body
4152   //
4153   //   scalar.body:
4154   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4155   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4156   //     s2 = a[i]
4157   //     b[i] = s2 - s1
4158   //     br cond, scalar.body, ...
4159   //
4160   // In this example, s1 is a recurrence because it's value depends on the
4161   // previous iteration. In the first phase of vectorization, we created a
4162   // temporary value for s1. We now complete the vectorization and produce the
4163   // shorthand vector IR shown below (for VF = 4, UF = 1).
4164   //
4165   //   vector.ph:
4166   //     v_init = vector(..., ..., ..., a[-1])
4167   //     br vector.body
4168   //
4169   //   vector.body
4170   //     i = phi [0, vector.ph], [i+4, vector.body]
4171   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4172   //     v2 = a[i, i+1, i+2, i+3];
4173   //     v3 = vector(v1(3), v2(0, 1, 2))
4174   //     b[i, i+1, i+2, i+3] = v2 - v3
4175   //     br cond, vector.body, middle.block
4176   //
4177   //   middle.block:
4178   //     x = v2(3)
4179   //     br scalar.ph
4180   //
4181   //   scalar.ph:
4182   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4183   //     br scalar.body
4184   //
4185   // After execution completes the vector loop, we extract the next value of
4186   // the recurrence (x) to use as the initial value in the scalar loop.
4187 
4188   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4189 
4190   auto *IdxTy = Builder.getInt32Ty();
4191   auto *One = ConstantInt::get(IdxTy, 1);
4192 
4193   // Create a vector from the initial value.
4194   auto *VectorInit = ScalarInit;
4195   if (VF.isVector()) {
4196     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4197     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4198     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4199     VectorInit = Builder.CreateInsertElement(
4200         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)),
4201         VectorInit, LastIdx, "vector.recur.init");
4202   }
4203 
4204   VPValue *PreviousDef = PhiR->getBackedgeValue();
4205   // We constructed a temporary phi node in the first phase of vectorization.
4206   // This phi node will eventually be deleted.
4207   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiR, 0)));
4208 
4209   // Create a phi node for the new recurrence. The current value will either be
4210   // the initial value inserted into a vector or loop-varying vector value.
4211   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4212   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4213 
4214   // Get the vectorized previous value of the last part UF - 1. It appears last
4215   // among all unrolled iterations, due to the order of their construction.
4216   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4217 
4218   // Find and set the insertion point after the previous value if it is an
4219   // instruction.
4220   BasicBlock::iterator InsertPt;
4221   // Note that the previous value may have been constant-folded so it is not
4222   // guaranteed to be an instruction in the vector loop.
4223   // FIXME: Loop invariant values do not form recurrences. We should deal with
4224   //        them earlier.
4225   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4226     InsertPt = LoopVectorBody->getFirstInsertionPt();
4227   else {
4228     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4229     if (isa<PHINode>(PreviousLastPart))
4230       // If the previous value is a phi node, we should insert after all the phi
4231       // nodes in the block containing the PHI to avoid breaking basic block
4232       // verification. Note that the basic block may be different to
4233       // LoopVectorBody, in case we predicate the loop.
4234       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4235     else
4236       InsertPt = ++PreviousInst->getIterator();
4237   }
4238   Builder.SetInsertPoint(&*InsertPt);
4239 
4240   // The vector from which to take the initial value for the current iteration
4241   // (actual or unrolled). Initially, this is the vector phi node.
4242   Value *Incoming = VecPhi;
4243 
4244   // Shuffle the current and previous vector and update the vector parts.
4245   for (unsigned Part = 0; Part < UF; ++Part) {
4246     Value *PreviousPart = State.get(PreviousDef, Part);
4247     Value *PhiPart = State.get(PhiR, Part);
4248     auto *Shuffle = VF.isVector()
4249                         ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1)
4250                         : Incoming;
4251     PhiPart->replaceAllUsesWith(Shuffle);
4252     cast<Instruction>(PhiPart)->eraseFromParent();
4253     State.reset(PhiR, Shuffle, Part);
4254     Incoming = PreviousPart;
4255   }
4256 
4257   // Fix the latch value of the new recurrence in the vector loop.
4258   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4259 
4260   // Extract the last vector element in the middle block. This will be the
4261   // initial value for the recurrence when jumping to the scalar loop.
4262   auto *ExtractForScalar = Incoming;
4263   if (VF.isVector()) {
4264     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4265     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4266     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4267     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4268                                                     "vector.recur.extract");
4269   }
4270   // Extract the second last element in the middle block if the
4271   // Phi is used outside the loop. We need to extract the phi itself
4272   // and not the last element (the phi update in the current iteration). This
4273   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4274   // when the scalar loop is not run at all.
4275   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4276   if (VF.isVector()) {
4277     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4278     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4279     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4280         Incoming, Idx, "vector.recur.extract.for.phi");
4281   } else if (UF > 1)
4282     // When loop is unrolled without vectorizing, initialize
4283     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4284     // of `Incoming`. This is analogous to the vectorized case above: extracting
4285     // the second last element when VF > 1.
4286     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4287 
4288   // Fix the initial value of the original recurrence in the scalar loop.
4289   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4290   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4291   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4292   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4293     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4294     Start->addIncoming(Incoming, BB);
4295   }
4296 
4297   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4298   Phi->setName("scalar.recur");
4299 
4300   // Finally, fix users of the recurrence outside the loop. The users will need
4301   // either the last value of the scalar recurrence or the last value of the
4302   // vector recurrence we extracted in the middle block. Since the loop is in
4303   // LCSSA form, we just need to find all the phi nodes for the original scalar
4304   // recurrence in the exit block, and then add an edge for the middle block.
4305   // Note that LCSSA does not imply single entry when the original scalar loop
4306   // had multiple exiting edges (as we always run the last iteration in the
4307   // scalar epilogue); in that case, the exiting path through middle will be
4308   // dynamically dead and the value picked for the phi doesn't matter.
4309   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4310     if (any_of(LCSSAPhi.incoming_values(),
4311                [Phi](Value *V) { return V == Phi; }))
4312       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4313 }
4314 
4315 void InnerLoopVectorizer::fixReduction(VPWidenPHIRecipe *PhiR,
4316                                        VPTransformState &State) {
4317   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4318   // Get it's reduction variable descriptor.
4319   assert(Legal->isReductionVariable(OrigPhi) &&
4320          "Unable to find the reduction variable");
4321   const RecurrenceDescriptor &RdxDesc = *PhiR->getRecurrenceDescriptor();
4322 
4323   RecurKind RK = RdxDesc.getRecurrenceKind();
4324   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4325   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4326   setDebugLocFromInst(Builder, ReductionStartValue);
4327   bool IsInLoopReductionPhi = Cost->isInLoopReduction(OrigPhi);
4328 
4329   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4330   // This is the vector-clone of the value that leaves the loop.
4331   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4332 
4333   // Wrap flags are in general invalid after vectorization, clear them.
4334   clearReductionWrapFlags(RdxDesc, State);
4335 
4336   // Fix the vector-loop phi.
4337 
4338   // Reductions do not have to start at zero. They can start with
4339   // any loop invariant values.
4340   BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4341 
4342   bool IsOrdered = IsInLoopReductionPhi && Cost->useOrderedReductions(RdxDesc);
4343 
4344   for (unsigned Part = 0; Part < UF; ++Part) {
4345     if (IsOrdered && Part > 0)
4346       break;
4347     Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part);
4348     Value *Val = State.get(PhiR->getBackedgeValue(), Part);
4349     if (IsOrdered)
4350       Val = State.get(PhiR->getBackedgeValue(), UF - 1);
4351 
4352     cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch);
4353   }
4354 
4355   // Before each round, move the insertion point right between
4356   // the PHIs and the values we are going to write.
4357   // This allows us to write both PHINodes and the extractelement
4358   // instructions.
4359   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4360 
4361   setDebugLocFromInst(Builder, LoopExitInst);
4362 
4363   Type *PhiTy = OrigPhi->getType();
4364   // If tail is folded by masking, the vector value to leave the loop should be
4365   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4366   // instead of the former. For an inloop reduction the reduction will already
4367   // be predicated, and does not need to be handled here.
4368   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4369     for (unsigned Part = 0; Part < UF; ++Part) {
4370       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4371       Value *Sel = nullptr;
4372       for (User *U : VecLoopExitInst->users()) {
4373         if (isa<SelectInst>(U)) {
4374           assert(!Sel && "Reduction exit feeding two selects");
4375           Sel = U;
4376         } else
4377           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4378       }
4379       assert(Sel && "Reduction exit feeds no select");
4380       State.reset(LoopExitInstDef, Sel, Part);
4381 
4382       // If the target can create a predicated operator for the reduction at no
4383       // extra cost in the loop (for example a predicated vadd), it can be
4384       // cheaper for the select to remain in the loop than be sunk out of it,
4385       // and so use the select value for the phi instead of the old
4386       // LoopExitValue.
4387       if (PreferPredicatedReductionSelect ||
4388           TTI->preferPredicatedReductionSelect(
4389               RdxDesc.getOpcode(), PhiTy,
4390               TargetTransformInfo::ReductionFlags())) {
4391         auto *VecRdxPhi =
4392             cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part));
4393         VecRdxPhi->setIncomingValueForBlock(
4394             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4395       }
4396     }
4397   }
4398 
4399   // If the vector reduction can be performed in a smaller type, we truncate
4400   // then extend the loop exit value to enable InstCombine to evaluate the
4401   // entire expression in the smaller type.
4402   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4403     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4404     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4405     Builder.SetInsertPoint(
4406         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4407     VectorParts RdxParts(UF);
4408     for (unsigned Part = 0; Part < UF; ++Part) {
4409       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4410       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4411       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4412                                         : Builder.CreateZExt(Trunc, VecTy);
4413       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4414            UI != RdxParts[Part]->user_end();)
4415         if (*UI != Trunc) {
4416           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4417           RdxParts[Part] = Extnd;
4418         } else {
4419           ++UI;
4420         }
4421     }
4422     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4423     for (unsigned Part = 0; Part < UF; ++Part) {
4424       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4425       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4426     }
4427   }
4428 
4429   // Reduce all of the unrolled parts into a single vector.
4430   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4431   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4432 
4433   // The middle block terminator has already been assigned a DebugLoc here (the
4434   // OrigLoop's single latch terminator). We want the whole middle block to
4435   // appear to execute on this line because: (a) it is all compiler generated,
4436   // (b) these instructions are always executed after evaluating the latch
4437   // conditional branch, and (c) other passes may add new predecessors which
4438   // terminate on this line. This is the easiest way to ensure we don't
4439   // accidentally cause an extra step back into the loop while debugging.
4440   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4441   if (IsOrdered)
4442     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4443   else {
4444     // Floating-point operations should have some FMF to enable the reduction.
4445     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4446     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4447     for (unsigned Part = 1; Part < UF; ++Part) {
4448       Value *RdxPart = State.get(LoopExitInstDef, Part);
4449       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4450         ReducedPartRdx = Builder.CreateBinOp(
4451             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4452       } else {
4453         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4454       }
4455     }
4456   }
4457 
4458   // Create the reduction after the loop. Note that inloop reductions create the
4459   // target reduction in the loop using a Reduction recipe.
4460   if (VF.isVector() && !IsInLoopReductionPhi) {
4461     ReducedPartRdx =
4462         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4463     // If the reduction can be performed in a smaller type, we need to extend
4464     // the reduction to the wider type before we branch to the original loop.
4465     if (PhiTy != RdxDesc.getRecurrenceType())
4466       ReducedPartRdx = RdxDesc.isSigned()
4467                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4468                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4469   }
4470 
4471   // Create a phi node that merges control-flow from the backedge-taken check
4472   // block and the middle block.
4473   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4474                                         LoopScalarPreHeader->getTerminator());
4475   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4476     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4477   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4478 
4479   // Now, we need to fix the users of the reduction variable
4480   // inside and outside of the scalar remainder loop.
4481 
4482   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4483   // in the exit blocks.  See comment on analogous loop in
4484   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4485   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4486     if (any_of(LCSSAPhi.incoming_values(),
4487                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4488       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4489 
4490   // Fix the scalar loop reduction variable with the incoming reduction sum
4491   // from the vector body and from the backedge value.
4492   int IncomingEdgeBlockIdx =
4493       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4494   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4495   // Pick the other block.
4496   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4497   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4498   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4499 }
4500 
4501 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4502                                                   VPTransformState &State) {
4503   RecurKind RK = RdxDesc.getRecurrenceKind();
4504   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4505     return;
4506 
4507   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4508   assert(LoopExitInstr && "null loop exit instruction");
4509   SmallVector<Instruction *, 8> Worklist;
4510   SmallPtrSet<Instruction *, 8> Visited;
4511   Worklist.push_back(LoopExitInstr);
4512   Visited.insert(LoopExitInstr);
4513 
4514   while (!Worklist.empty()) {
4515     Instruction *Cur = Worklist.pop_back_val();
4516     if (isa<OverflowingBinaryOperator>(Cur))
4517       for (unsigned Part = 0; Part < UF; ++Part) {
4518         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4519         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4520       }
4521 
4522     for (User *U : Cur->users()) {
4523       Instruction *UI = cast<Instruction>(U);
4524       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4525           Visited.insert(UI).second)
4526         Worklist.push_back(UI);
4527     }
4528   }
4529 }
4530 
4531 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4532   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4533     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4534       // Some phis were already hand updated by the reduction and recurrence
4535       // code above, leave them alone.
4536       continue;
4537 
4538     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4539     // Non-instruction incoming values will have only one value.
4540 
4541     VPLane Lane = VPLane::getFirstLane();
4542     if (isa<Instruction>(IncomingValue) &&
4543         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4544                                            VF))
4545       Lane = VPLane::getLastLaneForVF(VF);
4546 
4547     // Can be a loop invariant incoming value or the last scalar value to be
4548     // extracted from the vectorized loop.
4549     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4550     Value *lastIncomingValue =
4551         OrigLoop->isLoopInvariant(IncomingValue)
4552             ? IncomingValue
4553             : State.get(State.Plan->getVPValue(IncomingValue),
4554                         VPIteration(UF - 1, Lane));
4555     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4556   }
4557 }
4558 
4559 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4560   // The basic block and loop containing the predicated instruction.
4561   auto *PredBB = PredInst->getParent();
4562   auto *VectorLoop = LI->getLoopFor(PredBB);
4563 
4564   // Initialize a worklist with the operands of the predicated instruction.
4565   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4566 
4567   // Holds instructions that we need to analyze again. An instruction may be
4568   // reanalyzed if we don't yet know if we can sink it or not.
4569   SmallVector<Instruction *, 8> InstsToReanalyze;
4570 
4571   // Returns true if a given use occurs in the predicated block. Phi nodes use
4572   // their operands in their corresponding predecessor blocks.
4573   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4574     auto *I = cast<Instruction>(U.getUser());
4575     BasicBlock *BB = I->getParent();
4576     if (auto *Phi = dyn_cast<PHINode>(I))
4577       BB = Phi->getIncomingBlock(
4578           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4579     return BB == PredBB;
4580   };
4581 
4582   // Iteratively sink the scalarized operands of the predicated instruction
4583   // into the block we created for it. When an instruction is sunk, it's
4584   // operands are then added to the worklist. The algorithm ends after one pass
4585   // through the worklist doesn't sink a single instruction.
4586   bool Changed;
4587   do {
4588     // Add the instructions that need to be reanalyzed to the worklist, and
4589     // reset the changed indicator.
4590     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4591     InstsToReanalyze.clear();
4592     Changed = false;
4593 
4594     while (!Worklist.empty()) {
4595       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4596 
4597       // We can't sink an instruction if it is a phi node, is not in the loop,
4598       // or may have side effects.
4599       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4600           I->mayHaveSideEffects())
4601         continue;
4602 
4603       // If the instruction is already in PredBB, check if we can sink its
4604       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4605       // sinking the scalar instruction I, hence it appears in PredBB; but it
4606       // may have failed to sink I's operands (recursively), which we try
4607       // (again) here.
4608       if (I->getParent() == PredBB) {
4609         Worklist.insert(I->op_begin(), I->op_end());
4610         continue;
4611       }
4612 
4613       // It's legal to sink the instruction if all its uses occur in the
4614       // predicated block. Otherwise, there's nothing to do yet, and we may
4615       // need to reanalyze the instruction.
4616       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4617         InstsToReanalyze.push_back(I);
4618         continue;
4619       }
4620 
4621       // Move the instruction to the beginning of the predicated block, and add
4622       // it's operands to the worklist.
4623       I->moveBefore(&*PredBB->getFirstInsertionPt());
4624       Worklist.insert(I->op_begin(), I->op_end());
4625 
4626       // The sinking may have enabled other instructions to be sunk, so we will
4627       // need to iterate.
4628       Changed = true;
4629     }
4630   } while (Changed);
4631 }
4632 
4633 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4634   for (PHINode *OrigPhi : OrigPHIsToFix) {
4635     VPWidenPHIRecipe *VPPhi =
4636         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4637     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4638     // Make sure the builder has a valid insert point.
4639     Builder.SetInsertPoint(NewPhi);
4640     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4641       VPValue *Inc = VPPhi->getIncomingValue(i);
4642       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4643       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4644     }
4645   }
4646 }
4647 
4648 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4649   return Cost->useOrderedReductions(RdxDesc);
4650 }
4651 
4652 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4653                                    VPUser &Operands, unsigned UF,
4654                                    ElementCount VF, bool IsPtrLoopInvariant,
4655                                    SmallBitVector &IsIndexLoopInvariant,
4656                                    VPTransformState &State) {
4657   // Construct a vector GEP by widening the operands of the scalar GEP as
4658   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4659   // results in a vector of pointers when at least one operand of the GEP
4660   // is vector-typed. Thus, to keep the representation compact, we only use
4661   // vector-typed operands for loop-varying values.
4662 
4663   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4664     // If we are vectorizing, but the GEP has only loop-invariant operands,
4665     // the GEP we build (by only using vector-typed operands for
4666     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4667     // produce a vector of pointers, we need to either arbitrarily pick an
4668     // operand to broadcast, or broadcast a clone of the original GEP.
4669     // Here, we broadcast a clone of the original.
4670     //
4671     // TODO: If at some point we decide to scalarize instructions having
4672     //       loop-invariant operands, this special case will no longer be
4673     //       required. We would add the scalarization decision to
4674     //       collectLoopScalars() and teach getVectorValue() to broadcast
4675     //       the lane-zero scalar value.
4676     auto *Clone = Builder.Insert(GEP->clone());
4677     for (unsigned Part = 0; Part < UF; ++Part) {
4678       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4679       State.set(VPDef, EntryPart, Part);
4680       addMetadata(EntryPart, GEP);
4681     }
4682   } else {
4683     // If the GEP has at least one loop-varying operand, we are sure to
4684     // produce a vector of pointers. But if we are only unrolling, we want
4685     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4686     // produce with the code below will be scalar (if VF == 1) or vector
4687     // (otherwise). Note that for the unroll-only case, we still maintain
4688     // values in the vector mapping with initVector, as we do for other
4689     // instructions.
4690     for (unsigned Part = 0; Part < UF; ++Part) {
4691       // The pointer operand of the new GEP. If it's loop-invariant, we
4692       // won't broadcast it.
4693       auto *Ptr = IsPtrLoopInvariant
4694                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4695                       : State.get(Operands.getOperand(0), Part);
4696 
4697       // Collect all the indices for the new GEP. If any index is
4698       // loop-invariant, we won't broadcast it.
4699       SmallVector<Value *, 4> Indices;
4700       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4701         VPValue *Operand = Operands.getOperand(I);
4702         if (IsIndexLoopInvariant[I - 1])
4703           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4704         else
4705           Indices.push_back(State.get(Operand, Part));
4706       }
4707 
4708       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4709       // but it should be a vector, otherwise.
4710       auto *NewGEP =
4711           GEP->isInBounds()
4712               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4713                                           Indices)
4714               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4715       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4716              "NewGEP is not a pointer vector");
4717       State.set(VPDef, NewGEP, Part);
4718       addMetadata(NewGEP, GEP);
4719     }
4720   }
4721 }
4722 
4723 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4724                                               RecurrenceDescriptor *RdxDesc,
4725                                               VPWidenPHIRecipe *PhiR,
4726                                               VPTransformState &State) {
4727   PHINode *P = cast<PHINode>(PN);
4728   if (EnableVPlanNativePath) {
4729     // Currently we enter here in the VPlan-native path for non-induction
4730     // PHIs where all control flow is uniform. We simply widen these PHIs.
4731     // Create a vector phi with no operands - the vector phi operands will be
4732     // set at the end of vector code generation.
4733     Type *VecTy = (State.VF.isScalar())
4734                       ? PN->getType()
4735                       : VectorType::get(PN->getType(), State.VF);
4736     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4737     State.set(PhiR, VecPhi, 0);
4738     OrigPHIsToFix.push_back(P);
4739 
4740     return;
4741   }
4742 
4743   assert(PN->getParent() == OrigLoop->getHeader() &&
4744          "Non-header phis should have been handled elsewhere");
4745 
4746   // In order to support recurrences we need to be able to vectorize Phi nodes.
4747   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4748   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4749   // this value when we vectorize all of the instructions that use the PHI.
4750   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4751     bool ScalarPHI =
4752         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4753     Type *VecTy =
4754         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4755 
4756     bool IsOrdered = Cost->isInLoopReduction(cast<PHINode>(PN)) &&
4757                      Cost->useOrderedReductions(*RdxDesc);
4758     unsigned LastPartForNewPhi = IsOrdered ? 1 : State.UF;
4759     for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
4760       Value *EntryPart = PHINode::Create(
4761           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4762       State.set(PhiR, EntryPart, Part);
4763     }
4764     if (Legal->isFirstOrderRecurrence(P))
4765       return;
4766     VPValue *StartVPV = PhiR->getStartValue();
4767     Value *StartV = StartVPV->getLiveInIRValue();
4768 
4769     Value *Iden = nullptr;
4770 
4771     assert(Legal->isReductionVariable(P) && StartV &&
4772            "RdxDesc should only be set for reduction variables; in that case "
4773            "a StartV is also required");
4774     RecurKind RK = RdxDesc->getRecurrenceKind();
4775     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4776       // MinMax reduction have the start value as their identify.
4777       if (ScalarPHI) {
4778         Iden = StartV;
4779       } else {
4780         IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4781         Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4782         StartV = Iden =
4783             Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4784       }
4785     } else {
4786       Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4787           RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags());
4788       Iden = IdenC;
4789 
4790       if (!ScalarPHI) {
4791         Iden = ConstantVector::getSplat(State.VF, IdenC);
4792         IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4793         Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4794         Constant *Zero = Builder.getInt32(0);
4795         StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4796       }
4797     }
4798 
4799     for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
4800       Value *EntryPart = State.get(PhiR, Part);
4801       // Make sure to add the reduction start value only to the
4802       // first unroll part.
4803       Value *StartVal = (Part == 0) ? StartV : Iden;
4804       cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4805     }
4806 
4807     return;
4808   }
4809 
4810   assert(!Legal->isReductionVariable(P) &&
4811          "reductions should be handled above");
4812 
4813   setDebugLocFromInst(Builder, P);
4814 
4815   // This PHINode must be an induction variable.
4816   // Make sure that we know about it.
4817   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4818 
4819   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4820   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4821 
4822   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4823   // which can be found from the original scalar operations.
4824   switch (II.getKind()) {
4825   case InductionDescriptor::IK_NoInduction:
4826     llvm_unreachable("Unknown induction");
4827   case InductionDescriptor::IK_IntInduction:
4828   case InductionDescriptor::IK_FpInduction:
4829     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4830   case InductionDescriptor::IK_PtrInduction: {
4831     // Handle the pointer induction variable case.
4832     assert(P->getType()->isPointerTy() && "Unexpected type.");
4833 
4834     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4835       // This is the normalized GEP that starts counting at zero.
4836       Value *PtrInd =
4837           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4838       // Determine the number of scalars we need to generate for each unroll
4839       // iteration. If the instruction is uniform, we only need to generate the
4840       // first lane. Otherwise, we generate all VF values.
4841       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4842       unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
4843 
4844       bool NeedsVectorIndex = !IsUniform && VF.isScalable();
4845       Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr;
4846       if (NeedsVectorIndex) {
4847         Type *VecIVTy = VectorType::get(PtrInd->getType(), VF);
4848         UnitStepVec = Builder.CreateStepVector(VecIVTy);
4849         PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd);
4850       }
4851 
4852       for (unsigned Part = 0; Part < UF; ++Part) {
4853         Value *PartStart = createStepForVF(
4854             Builder, ConstantInt::get(PtrInd->getType(), Part), VF);
4855 
4856         if (NeedsVectorIndex) {
4857           Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart);
4858           Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec);
4859           Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices);
4860           Value *SclrGep =
4861               emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II);
4862           SclrGep->setName("next.gep");
4863           State.set(PhiR, SclrGep, Part);
4864           // We've cached the whole vector, which means we can support the
4865           // extraction of any lane.
4866           continue;
4867         }
4868 
4869         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4870           Value *Idx = Builder.CreateAdd(
4871               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4872           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4873           Value *SclrGep =
4874               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4875           SclrGep->setName("next.gep");
4876           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4877         }
4878       }
4879       return;
4880     }
4881     assert(isa<SCEVConstant>(II.getStep()) &&
4882            "Induction step not a SCEV constant!");
4883     Type *PhiType = II.getStep()->getType();
4884 
4885     // Build a pointer phi
4886     Value *ScalarStartValue = II.getStartValue();
4887     Type *ScStValueType = ScalarStartValue->getType();
4888     PHINode *NewPointerPhi =
4889         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4890     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4891 
4892     // A pointer induction, performed by using a gep
4893     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4894     Instruction *InductionLoc = LoopLatch->getTerminator();
4895     const SCEV *ScalarStep = II.getStep();
4896     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4897     Value *ScalarStepValue =
4898         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4899     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4900     Value *NumUnrolledElems =
4901         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4902     Value *InductionGEP = GetElementPtrInst::Create(
4903         ScStValueType->getPointerElementType(), NewPointerPhi,
4904         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4905         InductionLoc);
4906     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4907 
4908     // Create UF many actual address geps that use the pointer
4909     // phi as base and a vectorized version of the step value
4910     // (<step*0, ..., step*N>) as offset.
4911     for (unsigned Part = 0; Part < State.UF; ++Part) {
4912       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4913       Value *StartOffsetScalar =
4914           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4915       Value *StartOffset =
4916           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4917       // Create a vector of consecutive numbers from zero to VF.
4918       StartOffset =
4919           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4920 
4921       Value *GEP = Builder.CreateGEP(
4922           ScStValueType->getPointerElementType(), NewPointerPhi,
4923           Builder.CreateMul(
4924               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4925               "vector.gep"));
4926       State.set(PhiR, GEP, Part);
4927     }
4928   }
4929   }
4930 }
4931 
4932 /// A helper function for checking whether an integer division-related
4933 /// instruction may divide by zero (in which case it must be predicated if
4934 /// executed conditionally in the scalar code).
4935 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4936 /// Non-zero divisors that are non compile-time constants will not be
4937 /// converted into multiplication, so we will still end up scalarizing
4938 /// the division, but can do so w/o predication.
4939 static bool mayDivideByZero(Instruction &I) {
4940   assert((I.getOpcode() == Instruction::UDiv ||
4941           I.getOpcode() == Instruction::SDiv ||
4942           I.getOpcode() == Instruction::URem ||
4943           I.getOpcode() == Instruction::SRem) &&
4944          "Unexpected instruction");
4945   Value *Divisor = I.getOperand(1);
4946   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4947   return !CInt || CInt->isZero();
4948 }
4949 
4950 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4951                                            VPUser &User,
4952                                            VPTransformState &State) {
4953   switch (I.getOpcode()) {
4954   case Instruction::Call:
4955   case Instruction::Br:
4956   case Instruction::PHI:
4957   case Instruction::GetElementPtr:
4958   case Instruction::Select:
4959     llvm_unreachable("This instruction is handled by a different recipe.");
4960   case Instruction::UDiv:
4961   case Instruction::SDiv:
4962   case Instruction::SRem:
4963   case Instruction::URem:
4964   case Instruction::Add:
4965   case Instruction::FAdd:
4966   case Instruction::Sub:
4967   case Instruction::FSub:
4968   case Instruction::FNeg:
4969   case Instruction::Mul:
4970   case Instruction::FMul:
4971   case Instruction::FDiv:
4972   case Instruction::FRem:
4973   case Instruction::Shl:
4974   case Instruction::LShr:
4975   case Instruction::AShr:
4976   case Instruction::And:
4977   case Instruction::Or:
4978   case Instruction::Xor: {
4979     // Just widen unops and binops.
4980     setDebugLocFromInst(Builder, &I);
4981 
4982     for (unsigned Part = 0; Part < UF; ++Part) {
4983       SmallVector<Value *, 2> Ops;
4984       for (VPValue *VPOp : User.operands())
4985         Ops.push_back(State.get(VPOp, Part));
4986 
4987       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4988 
4989       if (auto *VecOp = dyn_cast<Instruction>(V))
4990         VecOp->copyIRFlags(&I);
4991 
4992       // Use this vector value for all users of the original instruction.
4993       State.set(Def, V, Part);
4994       addMetadata(V, &I);
4995     }
4996 
4997     break;
4998   }
4999   case Instruction::ICmp:
5000   case Instruction::FCmp: {
5001     // Widen compares. Generate vector compares.
5002     bool FCmp = (I.getOpcode() == Instruction::FCmp);
5003     auto *Cmp = cast<CmpInst>(&I);
5004     setDebugLocFromInst(Builder, Cmp);
5005     for (unsigned Part = 0; Part < UF; ++Part) {
5006       Value *A = State.get(User.getOperand(0), Part);
5007       Value *B = State.get(User.getOperand(1), Part);
5008       Value *C = nullptr;
5009       if (FCmp) {
5010         // Propagate fast math flags.
5011         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
5012         Builder.setFastMathFlags(Cmp->getFastMathFlags());
5013         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
5014       } else {
5015         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
5016       }
5017       State.set(Def, C, Part);
5018       addMetadata(C, &I);
5019     }
5020 
5021     break;
5022   }
5023 
5024   case Instruction::ZExt:
5025   case Instruction::SExt:
5026   case Instruction::FPToUI:
5027   case Instruction::FPToSI:
5028   case Instruction::FPExt:
5029   case Instruction::PtrToInt:
5030   case Instruction::IntToPtr:
5031   case Instruction::SIToFP:
5032   case Instruction::UIToFP:
5033   case Instruction::Trunc:
5034   case Instruction::FPTrunc:
5035   case Instruction::BitCast: {
5036     auto *CI = cast<CastInst>(&I);
5037     setDebugLocFromInst(Builder, CI);
5038 
5039     /// Vectorize casts.
5040     Type *DestTy =
5041         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
5042 
5043     for (unsigned Part = 0; Part < UF; ++Part) {
5044       Value *A = State.get(User.getOperand(0), Part);
5045       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
5046       State.set(Def, Cast, Part);
5047       addMetadata(Cast, &I);
5048     }
5049     break;
5050   }
5051   default:
5052     // This instruction is not vectorized by simple widening.
5053     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
5054     llvm_unreachable("Unhandled instruction!");
5055   } // end of switch.
5056 }
5057 
5058 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
5059                                                VPUser &ArgOperands,
5060                                                VPTransformState &State) {
5061   assert(!isa<DbgInfoIntrinsic>(I) &&
5062          "DbgInfoIntrinsic should have been dropped during VPlan construction");
5063   setDebugLocFromInst(Builder, &I);
5064 
5065   Module *M = I.getParent()->getParent()->getParent();
5066   auto *CI = cast<CallInst>(&I);
5067 
5068   SmallVector<Type *, 4> Tys;
5069   for (Value *ArgOperand : CI->arg_operands())
5070     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
5071 
5072   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5073 
5074   // The flag shows whether we use Intrinsic or a usual Call for vectorized
5075   // version of the instruction.
5076   // Is it beneficial to perform intrinsic call compared to lib call?
5077   bool NeedToScalarize = false;
5078   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
5079   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
5080   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
5081   assert((UseVectorIntrinsic || !NeedToScalarize) &&
5082          "Instruction should be scalarized elsewhere.");
5083   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
5084          "Either the intrinsic cost or vector call cost must be valid");
5085 
5086   for (unsigned Part = 0; Part < UF; ++Part) {
5087     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
5088     SmallVector<Value *, 4> Args;
5089     for (auto &I : enumerate(ArgOperands.operands())) {
5090       // Some intrinsics have a scalar argument - don't replace it with a
5091       // vector.
5092       Value *Arg;
5093       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
5094         Arg = State.get(I.value(), Part);
5095       else {
5096         Arg = State.get(I.value(), VPIteration(0, 0));
5097         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
5098           TysForDecl.push_back(Arg->getType());
5099       }
5100       Args.push_back(Arg);
5101     }
5102 
5103     Function *VectorF;
5104     if (UseVectorIntrinsic) {
5105       // Use vector version of the intrinsic.
5106       if (VF.isVector())
5107         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5108       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5109       assert(VectorF && "Can't retrieve vector intrinsic.");
5110     } else {
5111       // Use vector version of the function call.
5112       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
5113 #ifndef NDEBUG
5114       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
5115              "Can't create vector function.");
5116 #endif
5117         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
5118     }
5119       SmallVector<OperandBundleDef, 1> OpBundles;
5120       CI->getOperandBundlesAsDefs(OpBundles);
5121       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5122 
5123       if (isa<FPMathOperator>(V))
5124         V->copyFastMathFlags(CI);
5125 
5126       State.set(Def, V, Part);
5127       addMetadata(V, &I);
5128   }
5129 }
5130 
5131 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5132                                                  VPUser &Operands,
5133                                                  bool InvariantCond,
5134                                                  VPTransformState &State) {
5135   setDebugLocFromInst(Builder, &I);
5136 
5137   // The condition can be loop invariant  but still defined inside the
5138   // loop. This means that we can't just use the original 'cond' value.
5139   // We have to take the 'vectorized' value and pick the first lane.
5140   // Instcombine will make this a no-op.
5141   auto *InvarCond = InvariantCond
5142                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5143                         : nullptr;
5144 
5145   for (unsigned Part = 0; Part < UF; ++Part) {
5146     Value *Cond =
5147         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5148     Value *Op0 = State.get(Operands.getOperand(1), Part);
5149     Value *Op1 = State.get(Operands.getOperand(2), Part);
5150     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5151     State.set(VPDef, Sel, Part);
5152     addMetadata(Sel, &I);
5153   }
5154 }
5155 
5156 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5157   // We should not collect Scalars more than once per VF. Right now, this
5158   // function is called from collectUniformsAndScalars(), which already does
5159   // this check. Collecting Scalars for VF=1 does not make any sense.
5160   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5161          "This function should not be visited twice for the same VF");
5162 
5163   SmallSetVector<Instruction *, 8> Worklist;
5164 
5165   // These sets are used to seed the analysis with pointers used by memory
5166   // accesses that will remain scalar.
5167   SmallSetVector<Instruction *, 8> ScalarPtrs;
5168   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5169   auto *Latch = TheLoop->getLoopLatch();
5170 
5171   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5172   // The pointer operands of loads and stores will be scalar as long as the
5173   // memory access is not a gather or scatter operation. The value operand of a
5174   // store will remain scalar if the store is scalarized.
5175   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5176     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5177     assert(WideningDecision != CM_Unknown &&
5178            "Widening decision should be ready at this moment");
5179     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5180       if (Ptr == Store->getValueOperand())
5181         return WideningDecision == CM_Scalarize;
5182     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5183            "Ptr is neither a value or pointer operand");
5184     return WideningDecision != CM_GatherScatter;
5185   };
5186 
5187   // A helper that returns true if the given value is a bitcast or
5188   // getelementptr instruction contained in the loop.
5189   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5190     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5191             isa<GetElementPtrInst>(V)) &&
5192            !TheLoop->isLoopInvariant(V);
5193   };
5194 
5195   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5196     if (!isa<PHINode>(Ptr) ||
5197         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5198       return false;
5199     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5200     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5201       return false;
5202     return isScalarUse(MemAccess, Ptr);
5203   };
5204 
5205   // A helper that evaluates a memory access's use of a pointer. If the
5206   // pointer is actually the pointer induction of a loop, it is being
5207   // inserted into Worklist. If the use will be a scalar use, and the
5208   // pointer is only used by memory accesses, we place the pointer in
5209   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5210   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5211     if (isScalarPtrInduction(MemAccess, Ptr)) {
5212       Worklist.insert(cast<Instruction>(Ptr));
5213       Instruction *Update = cast<Instruction>(
5214           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5215       Worklist.insert(Update);
5216       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5217                         << "\n");
5218       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5219                         << "\n");
5220       return;
5221     }
5222     // We only care about bitcast and getelementptr instructions contained in
5223     // the loop.
5224     if (!isLoopVaryingBitCastOrGEP(Ptr))
5225       return;
5226 
5227     // If the pointer has already been identified as scalar (e.g., if it was
5228     // also identified as uniform), there's nothing to do.
5229     auto *I = cast<Instruction>(Ptr);
5230     if (Worklist.count(I))
5231       return;
5232 
5233     // If the use of the pointer will be a scalar use, and all users of the
5234     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5235     // place the pointer in PossibleNonScalarPtrs.
5236     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5237           return isa<LoadInst>(U) || isa<StoreInst>(U);
5238         }))
5239       ScalarPtrs.insert(I);
5240     else
5241       PossibleNonScalarPtrs.insert(I);
5242   };
5243 
5244   // We seed the scalars analysis with three classes of instructions: (1)
5245   // instructions marked uniform-after-vectorization and (2) bitcast,
5246   // getelementptr and (pointer) phi instructions used by memory accesses
5247   // requiring a scalar use.
5248   //
5249   // (1) Add to the worklist all instructions that have been identified as
5250   // uniform-after-vectorization.
5251   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5252 
5253   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5254   // memory accesses requiring a scalar use. The pointer operands of loads and
5255   // stores will be scalar as long as the memory accesses is not a gather or
5256   // scatter operation. The value operand of a store will remain scalar if the
5257   // store is scalarized.
5258   for (auto *BB : TheLoop->blocks())
5259     for (auto &I : *BB) {
5260       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5261         evaluatePtrUse(Load, Load->getPointerOperand());
5262       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5263         evaluatePtrUse(Store, Store->getPointerOperand());
5264         evaluatePtrUse(Store, Store->getValueOperand());
5265       }
5266     }
5267   for (auto *I : ScalarPtrs)
5268     if (!PossibleNonScalarPtrs.count(I)) {
5269       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5270       Worklist.insert(I);
5271     }
5272 
5273   // Insert the forced scalars.
5274   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5275   // induction variable when the PHI user is scalarized.
5276   auto ForcedScalar = ForcedScalars.find(VF);
5277   if (ForcedScalar != ForcedScalars.end())
5278     for (auto *I : ForcedScalar->second)
5279       Worklist.insert(I);
5280 
5281   // Expand the worklist by looking through any bitcasts and getelementptr
5282   // instructions we've already identified as scalar. This is similar to the
5283   // expansion step in collectLoopUniforms(); however, here we're only
5284   // expanding to include additional bitcasts and getelementptr instructions.
5285   unsigned Idx = 0;
5286   while (Idx != Worklist.size()) {
5287     Instruction *Dst = Worklist[Idx++];
5288     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5289       continue;
5290     auto *Src = cast<Instruction>(Dst->getOperand(0));
5291     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5292           auto *J = cast<Instruction>(U);
5293           return !TheLoop->contains(J) || Worklist.count(J) ||
5294                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5295                   isScalarUse(J, Src));
5296         })) {
5297       Worklist.insert(Src);
5298       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5299     }
5300   }
5301 
5302   // An induction variable will remain scalar if all users of the induction
5303   // variable and induction variable update remain scalar.
5304   for (auto &Induction : Legal->getInductionVars()) {
5305     auto *Ind = Induction.first;
5306     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5307 
5308     // If tail-folding is applied, the primary induction variable will be used
5309     // to feed a vector compare.
5310     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5311       continue;
5312 
5313     // Determine if all users of the induction variable are scalar after
5314     // vectorization.
5315     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5316       auto *I = cast<Instruction>(U);
5317       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5318     });
5319     if (!ScalarInd)
5320       continue;
5321 
5322     // Determine if all users of the induction variable update instruction are
5323     // scalar after vectorization.
5324     auto ScalarIndUpdate =
5325         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5326           auto *I = cast<Instruction>(U);
5327           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5328         });
5329     if (!ScalarIndUpdate)
5330       continue;
5331 
5332     // The induction variable and its update instruction will remain scalar.
5333     Worklist.insert(Ind);
5334     Worklist.insert(IndUpdate);
5335     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5336     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5337                       << "\n");
5338   }
5339 
5340   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5341 }
5342 
5343 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
5344   if (!blockNeedsPredication(I->getParent()))
5345     return false;
5346   switch(I->getOpcode()) {
5347   default:
5348     break;
5349   case Instruction::Load:
5350   case Instruction::Store: {
5351     if (!Legal->isMaskRequired(I))
5352       return false;
5353     auto *Ptr = getLoadStorePointerOperand(I);
5354     auto *Ty = getLoadStoreType(I);
5355     const Align Alignment = getLoadStoreAlignment(I);
5356     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5357                                 TTI.isLegalMaskedGather(Ty, Alignment))
5358                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5359                                 TTI.isLegalMaskedScatter(Ty, Alignment));
5360   }
5361   case Instruction::UDiv:
5362   case Instruction::SDiv:
5363   case Instruction::SRem:
5364   case Instruction::URem:
5365     return mayDivideByZero(*I);
5366   }
5367   return false;
5368 }
5369 
5370 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5371     Instruction *I, ElementCount VF) {
5372   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5373   assert(getWideningDecision(I, VF) == CM_Unknown &&
5374          "Decision should not be set yet.");
5375   auto *Group = getInterleavedAccessGroup(I);
5376   assert(Group && "Must have a group.");
5377 
5378   // If the instruction's allocated size doesn't equal it's type size, it
5379   // requires padding and will be scalarized.
5380   auto &DL = I->getModule()->getDataLayout();
5381   auto *ScalarTy = getLoadStoreType(I);
5382   if (hasIrregularType(ScalarTy, DL))
5383     return false;
5384 
5385   // Check if masking is required.
5386   // A Group may need masking for one of two reasons: it resides in a block that
5387   // needs predication, or it was decided to use masking to deal with gaps.
5388   bool PredicatedAccessRequiresMasking =
5389       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5390   bool AccessWithGapsRequiresMasking =
5391       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5392   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5393     return true;
5394 
5395   // If masked interleaving is required, we expect that the user/target had
5396   // enabled it, because otherwise it either wouldn't have been created or
5397   // it should have been invalidated by the CostModel.
5398   assert(useMaskedInterleavedAccesses(TTI) &&
5399          "Masked interleave-groups for predicated accesses are not enabled.");
5400 
5401   auto *Ty = getLoadStoreType(I);
5402   const Align Alignment = getLoadStoreAlignment(I);
5403   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5404                           : TTI.isLegalMaskedStore(Ty, Alignment);
5405 }
5406 
5407 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5408     Instruction *I, ElementCount VF) {
5409   // Get and ensure we have a valid memory instruction.
5410   LoadInst *LI = dyn_cast<LoadInst>(I);
5411   StoreInst *SI = dyn_cast<StoreInst>(I);
5412   assert((LI || SI) && "Invalid memory instruction");
5413 
5414   auto *Ptr = getLoadStorePointerOperand(I);
5415 
5416   // In order to be widened, the pointer should be consecutive, first of all.
5417   if (!Legal->isConsecutivePtr(Ptr))
5418     return false;
5419 
5420   // If the instruction is a store located in a predicated block, it will be
5421   // scalarized.
5422   if (isScalarWithPredication(I))
5423     return false;
5424 
5425   // If the instruction's allocated size doesn't equal it's type size, it
5426   // requires padding and will be scalarized.
5427   auto &DL = I->getModule()->getDataLayout();
5428   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5429   if (hasIrregularType(ScalarTy, DL))
5430     return false;
5431 
5432   return true;
5433 }
5434 
5435 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5436   // We should not collect Uniforms more than once per VF. Right now,
5437   // this function is called from collectUniformsAndScalars(), which
5438   // already does this check. Collecting Uniforms for VF=1 does not make any
5439   // sense.
5440 
5441   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5442          "This function should not be visited twice for the same VF");
5443 
5444   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5445   // not analyze again.  Uniforms.count(VF) will return 1.
5446   Uniforms[VF].clear();
5447 
5448   // We now know that the loop is vectorizable!
5449   // Collect instructions inside the loop that will remain uniform after
5450   // vectorization.
5451 
5452   // Global values, params and instructions outside of current loop are out of
5453   // scope.
5454   auto isOutOfScope = [&](Value *V) -> bool {
5455     Instruction *I = dyn_cast<Instruction>(V);
5456     return (!I || !TheLoop->contains(I));
5457   };
5458 
5459   SetVector<Instruction *> Worklist;
5460   BasicBlock *Latch = TheLoop->getLoopLatch();
5461 
5462   // Instructions that are scalar with predication must not be considered
5463   // uniform after vectorization, because that would create an erroneous
5464   // replicating region where only a single instance out of VF should be formed.
5465   // TODO: optimize such seldom cases if found important, see PR40816.
5466   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5467     if (isOutOfScope(I)) {
5468       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5469                         << *I << "\n");
5470       return;
5471     }
5472     if (isScalarWithPredication(I)) {
5473       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5474                         << *I << "\n");
5475       return;
5476     }
5477     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5478     Worklist.insert(I);
5479   };
5480 
5481   // Start with the conditional branch. If the branch condition is an
5482   // instruction contained in the loop that is only used by the branch, it is
5483   // uniform.
5484   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5485   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5486     addToWorklistIfAllowed(Cmp);
5487 
5488   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5489     InstWidening WideningDecision = getWideningDecision(I, VF);
5490     assert(WideningDecision != CM_Unknown &&
5491            "Widening decision should be ready at this moment");
5492 
5493     // A uniform memory op is itself uniform.  We exclude uniform stores
5494     // here as they demand the last lane, not the first one.
5495     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5496       assert(WideningDecision == CM_Scalarize);
5497       return true;
5498     }
5499 
5500     return (WideningDecision == CM_Widen ||
5501             WideningDecision == CM_Widen_Reverse ||
5502             WideningDecision == CM_Interleave);
5503   };
5504 
5505 
5506   // Returns true if Ptr is the pointer operand of a memory access instruction
5507   // I, and I is known to not require scalarization.
5508   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5509     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5510   };
5511 
5512   // Holds a list of values which are known to have at least one uniform use.
5513   // Note that there may be other uses which aren't uniform.  A "uniform use"
5514   // here is something which only demands lane 0 of the unrolled iterations;
5515   // it does not imply that all lanes produce the same value (e.g. this is not
5516   // the usual meaning of uniform)
5517   SetVector<Value *> HasUniformUse;
5518 
5519   // Scan the loop for instructions which are either a) known to have only
5520   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5521   for (auto *BB : TheLoop->blocks())
5522     for (auto &I : *BB) {
5523       // If there's no pointer operand, there's nothing to do.
5524       auto *Ptr = getLoadStorePointerOperand(&I);
5525       if (!Ptr)
5526         continue;
5527 
5528       // A uniform memory op is itself uniform.  We exclude uniform stores
5529       // here as they demand the last lane, not the first one.
5530       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5531         addToWorklistIfAllowed(&I);
5532 
5533       if (isUniformDecision(&I, VF)) {
5534         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5535         HasUniformUse.insert(Ptr);
5536       }
5537     }
5538 
5539   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5540   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5541   // disallows uses outside the loop as well.
5542   for (auto *V : HasUniformUse) {
5543     if (isOutOfScope(V))
5544       continue;
5545     auto *I = cast<Instruction>(V);
5546     auto UsersAreMemAccesses =
5547       llvm::all_of(I->users(), [&](User *U) -> bool {
5548         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5549       });
5550     if (UsersAreMemAccesses)
5551       addToWorklistIfAllowed(I);
5552   }
5553 
5554   // Expand Worklist in topological order: whenever a new instruction
5555   // is added , its users should be already inside Worklist.  It ensures
5556   // a uniform instruction will only be used by uniform instructions.
5557   unsigned idx = 0;
5558   while (idx != Worklist.size()) {
5559     Instruction *I = Worklist[idx++];
5560 
5561     for (auto OV : I->operand_values()) {
5562       // isOutOfScope operands cannot be uniform instructions.
5563       if (isOutOfScope(OV))
5564         continue;
5565       // First order recurrence Phi's should typically be considered
5566       // non-uniform.
5567       auto *OP = dyn_cast<PHINode>(OV);
5568       if (OP && Legal->isFirstOrderRecurrence(OP))
5569         continue;
5570       // If all the users of the operand are uniform, then add the
5571       // operand into the uniform worklist.
5572       auto *OI = cast<Instruction>(OV);
5573       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5574             auto *J = cast<Instruction>(U);
5575             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5576           }))
5577         addToWorklistIfAllowed(OI);
5578     }
5579   }
5580 
5581   // For an instruction to be added into Worklist above, all its users inside
5582   // the loop should also be in Worklist. However, this condition cannot be
5583   // true for phi nodes that form a cyclic dependence. We must process phi
5584   // nodes separately. An induction variable will remain uniform if all users
5585   // of the induction variable and induction variable update remain uniform.
5586   // The code below handles both pointer and non-pointer induction variables.
5587   for (auto &Induction : Legal->getInductionVars()) {
5588     auto *Ind = Induction.first;
5589     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5590 
5591     // Determine if all users of the induction variable are uniform after
5592     // vectorization.
5593     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5594       auto *I = cast<Instruction>(U);
5595       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5596              isVectorizedMemAccessUse(I, Ind);
5597     });
5598     if (!UniformInd)
5599       continue;
5600 
5601     // Determine if all users of the induction variable update instruction are
5602     // uniform after vectorization.
5603     auto UniformIndUpdate =
5604         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5605           auto *I = cast<Instruction>(U);
5606           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5607                  isVectorizedMemAccessUse(I, IndUpdate);
5608         });
5609     if (!UniformIndUpdate)
5610       continue;
5611 
5612     // The induction variable and its update instruction will remain uniform.
5613     addToWorklistIfAllowed(Ind);
5614     addToWorklistIfAllowed(IndUpdate);
5615   }
5616 
5617   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5618 }
5619 
5620 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5621   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5622 
5623   if (Legal->getRuntimePointerChecking()->Need) {
5624     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5625         "runtime pointer checks needed. Enable vectorization of this "
5626         "loop with '#pragma clang loop vectorize(enable)' when "
5627         "compiling with -Os/-Oz",
5628         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5629     return true;
5630   }
5631 
5632   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5633     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5634         "runtime SCEV checks needed. Enable vectorization of this "
5635         "loop with '#pragma clang loop vectorize(enable)' when "
5636         "compiling with -Os/-Oz",
5637         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5638     return true;
5639   }
5640 
5641   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5642   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5643     reportVectorizationFailure("Runtime stride check for small trip count",
5644         "runtime stride == 1 checks needed. Enable vectorization of "
5645         "this loop without such check by compiling with -Os/-Oz",
5646         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5647     return true;
5648   }
5649 
5650   return false;
5651 }
5652 
5653 ElementCount
5654 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5655   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5656     reportVectorizationInfo(
5657         "Disabling scalable vectorization, because target does not "
5658         "support scalable vectors.",
5659         "ScalableVectorsUnsupported", ORE, TheLoop);
5660     return ElementCount::getScalable(0);
5661   }
5662 
5663   if (Hints->isScalableVectorizationDisabled()) {
5664     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5665                             "ScalableVectorizationDisabled", ORE, TheLoop);
5666     return ElementCount::getScalable(0);
5667   }
5668 
5669   auto MaxScalableVF = ElementCount::getScalable(
5670       std::numeric_limits<ElementCount::ScalarTy>::max());
5671 
5672   // Disable scalable vectorization if the loop contains unsupported reductions.
5673   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5674   // FIXME: While for scalable vectors this is currently sufficient, this should
5675   // be replaced by a more detailed mechanism that filters out specific VFs,
5676   // instead of invalidating vectorization for a whole set of VFs based on the
5677   // MaxVF.
5678   if (!canVectorizeReductions(MaxScalableVF)) {
5679     reportVectorizationInfo(
5680         "Scalable vectorization not supported for the reduction "
5681         "operations found in this loop.",
5682         "ScalableVFUnfeasible", ORE, TheLoop);
5683     return ElementCount::getScalable(0);
5684   }
5685 
5686   if (Legal->isSafeForAnyVectorWidth())
5687     return MaxScalableVF;
5688 
5689   // Limit MaxScalableVF by the maximum safe dependence distance.
5690   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5691   MaxScalableVF = ElementCount::getScalable(
5692       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5693   if (!MaxScalableVF)
5694     reportVectorizationInfo(
5695         "Max legal vector width too small, scalable vectorization "
5696         "unfeasible.",
5697         "ScalableVFUnfeasible", ORE, TheLoop);
5698 
5699   return MaxScalableVF;
5700 }
5701 
5702 FixedScalableVFPair
5703 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5704                                                  ElementCount UserVF) {
5705   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5706   unsigned SmallestType, WidestType;
5707   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5708 
5709   // Get the maximum safe dependence distance in bits computed by LAA.
5710   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5711   // the memory accesses that is most restrictive (involved in the smallest
5712   // dependence distance).
5713   unsigned MaxSafeElements =
5714       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5715 
5716   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5717   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5718 
5719   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5720                     << ".\n");
5721   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5722                     << ".\n");
5723 
5724   // First analyze the UserVF, fall back if the UserVF should be ignored.
5725   if (UserVF) {
5726     auto MaxSafeUserVF =
5727         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5728 
5729     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF))
5730       return UserVF;
5731 
5732     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5733 
5734     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5735     // is better to ignore the hint and let the compiler choose a suitable VF.
5736     if (!UserVF.isScalable()) {
5737       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5738                         << " is unsafe, clamping to max safe VF="
5739                         << MaxSafeFixedVF << ".\n");
5740       ORE->emit([&]() {
5741         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5742                                           TheLoop->getStartLoc(),
5743                                           TheLoop->getHeader())
5744                << "User-specified vectorization factor "
5745                << ore::NV("UserVectorizationFactor", UserVF)
5746                << " is unsafe, clamping to maximum safe vectorization factor "
5747                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5748       });
5749       return MaxSafeFixedVF;
5750     }
5751 
5752     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5753                       << " is unsafe. Ignoring scalable UserVF.\n");
5754     ORE->emit([&]() {
5755       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5756                                         TheLoop->getStartLoc(),
5757                                         TheLoop->getHeader())
5758              << "User-specified vectorization factor "
5759              << ore::NV("UserVectorizationFactor", UserVF)
5760              << " is unsafe. Ignoring the hint to let the compiler pick a "
5761                 "suitable VF.";
5762     });
5763   }
5764 
5765   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5766                     << " / " << WidestType << " bits.\n");
5767 
5768   FixedScalableVFPair Result(ElementCount::getFixed(1),
5769                              ElementCount::getScalable(0));
5770   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5771                                            WidestType, MaxSafeFixedVF))
5772     Result.FixedVF = MaxVF;
5773 
5774   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5775                                            WidestType, MaxSafeScalableVF))
5776     if (MaxVF.isScalable()) {
5777       Result.ScalableVF = MaxVF;
5778       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5779                         << "\n");
5780     }
5781 
5782   return Result;
5783 }
5784 
5785 FixedScalableVFPair
5786 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5787   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5788     // TODO: It may by useful to do since it's still likely to be dynamically
5789     // uniform if the target can skip.
5790     reportVectorizationFailure(
5791         "Not inserting runtime ptr check for divergent target",
5792         "runtime pointer checks needed. Not enabled for divergent target",
5793         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5794     return FixedScalableVFPair::getNone();
5795   }
5796 
5797   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5798   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5799   if (TC == 1) {
5800     reportVectorizationFailure("Single iteration (non) loop",
5801         "loop trip count is one, irrelevant for vectorization",
5802         "SingleIterationLoop", ORE, TheLoop);
5803     return FixedScalableVFPair::getNone();
5804   }
5805 
5806   switch (ScalarEpilogueStatus) {
5807   case CM_ScalarEpilogueAllowed:
5808     return computeFeasibleMaxVF(TC, UserVF);
5809   case CM_ScalarEpilogueNotAllowedUsePredicate:
5810     LLVM_FALLTHROUGH;
5811   case CM_ScalarEpilogueNotNeededUsePredicate:
5812     LLVM_DEBUG(
5813         dbgs() << "LV: vector predicate hint/switch found.\n"
5814                << "LV: Not allowing scalar epilogue, creating predicated "
5815                << "vector loop.\n");
5816     break;
5817   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5818     // fallthrough as a special case of OptForSize
5819   case CM_ScalarEpilogueNotAllowedOptSize:
5820     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5821       LLVM_DEBUG(
5822           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5823     else
5824       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5825                         << "count.\n");
5826 
5827     // Bail if runtime checks are required, which are not good when optimising
5828     // for size.
5829     if (runtimeChecksRequired())
5830       return FixedScalableVFPair::getNone();
5831 
5832     break;
5833   }
5834 
5835   // The only loops we can vectorize without a scalar epilogue, are loops with
5836   // a bottom-test and a single exiting block. We'd have to handle the fact
5837   // that not every instruction executes on the last iteration.  This will
5838   // require a lane mask which varies through the vector loop body.  (TODO)
5839   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5840     // If there was a tail-folding hint/switch, but we can't fold the tail by
5841     // masking, fallback to a vectorization with a scalar epilogue.
5842     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5843       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5844                            "scalar epilogue instead.\n");
5845       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5846       return computeFeasibleMaxVF(TC, UserVF);
5847     }
5848     return FixedScalableVFPair::getNone();
5849   }
5850 
5851   // Now try the tail folding
5852 
5853   // Invalidate interleave groups that require an epilogue if we can't mask
5854   // the interleave-group.
5855   if (!useMaskedInterleavedAccesses(TTI)) {
5856     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5857            "No decisions should have been taken at this point");
5858     // Note: There is no need to invalidate any cost modeling decisions here, as
5859     // non where taken so far.
5860     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5861   }
5862 
5863   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF);
5864   // Avoid tail folding if the trip count is known to be a multiple of any VF
5865   // we chose.
5866   // FIXME: The condition below pessimises the case for fixed-width vectors,
5867   // when scalable VFs are also candidates for vectorization.
5868   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5869     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5870     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5871            "MaxFixedVF must be a power of 2");
5872     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5873                                    : MaxFixedVF.getFixedValue();
5874     ScalarEvolution *SE = PSE.getSE();
5875     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5876     const SCEV *ExitCount = SE->getAddExpr(
5877         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5878     const SCEV *Rem = SE->getURemExpr(
5879         SE->applyLoopGuards(ExitCount, TheLoop),
5880         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5881     if (Rem->isZero()) {
5882       // Accept MaxFixedVF if we do not have a tail.
5883       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5884       return MaxFactors;
5885     }
5886   }
5887 
5888   // If we don't know the precise trip count, or if the trip count that we
5889   // found modulo the vectorization factor is not zero, try to fold the tail
5890   // by masking.
5891   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5892   if (Legal->prepareToFoldTailByMasking()) {
5893     FoldTailByMasking = true;
5894     return MaxFactors;
5895   }
5896 
5897   // If there was a tail-folding hint/switch, but we can't fold the tail by
5898   // masking, fallback to a vectorization with a scalar epilogue.
5899   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5900     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5901                          "scalar epilogue instead.\n");
5902     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5903     return MaxFactors;
5904   }
5905 
5906   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5907     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5908     return FixedScalableVFPair::getNone();
5909   }
5910 
5911   if (TC == 0) {
5912     reportVectorizationFailure(
5913         "Unable to calculate the loop count due to complex control flow",
5914         "unable to calculate the loop count due to complex control flow",
5915         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5916     return FixedScalableVFPair::getNone();
5917   }
5918 
5919   reportVectorizationFailure(
5920       "Cannot optimize for size and vectorize at the same time.",
5921       "cannot optimize for size and vectorize at the same time. "
5922       "Enable vectorization of this loop with '#pragma clang loop "
5923       "vectorize(enable)' when compiling with -Os/-Oz",
5924       "NoTailLoopWithOptForSize", ORE, TheLoop);
5925   return FixedScalableVFPair::getNone();
5926 }
5927 
5928 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5929     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5930     const ElementCount &MaxSafeVF) {
5931   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5932   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5933       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5934                            : TargetTransformInfo::RGK_FixedWidthVector);
5935 
5936   // Convenience function to return the minimum of two ElementCounts.
5937   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5938     assert((LHS.isScalable() == RHS.isScalable()) &&
5939            "Scalable flags must match");
5940     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5941   };
5942 
5943   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5944   // Note that both WidestRegister and WidestType may not be a powers of 2.
5945   auto MaxVectorElementCount = ElementCount::get(
5946       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5947       ComputeScalableMaxVF);
5948   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5949   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5950                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5951 
5952   if (!MaxVectorElementCount) {
5953     LLVM_DEBUG(dbgs() << "LV: The target has no "
5954                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5955                       << " vector registers.\n");
5956     return ElementCount::getFixed(1);
5957   }
5958 
5959   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5960   if (ConstTripCount &&
5961       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5962       isPowerOf2_32(ConstTripCount)) {
5963     // We need to clamp the VF to be the ConstTripCount. There is no point in
5964     // choosing a higher viable VF as done in the loop below. If
5965     // MaxVectorElementCount is scalable, we only fall back on a fixed VF when
5966     // the TC is less than or equal to the known number of lanes.
5967     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5968                       << ConstTripCount << "\n");
5969     return TripCountEC;
5970   }
5971 
5972   ElementCount MaxVF = MaxVectorElementCount;
5973   if (TTI.shouldMaximizeVectorBandwidth() ||
5974       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5975     auto MaxVectorElementCountMaxBW = ElementCount::get(
5976         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5977         ComputeScalableMaxVF);
5978     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5979 
5980     // Collect all viable vectorization factors larger than the default MaxVF
5981     // (i.e. MaxVectorElementCount).
5982     SmallVector<ElementCount, 8> VFs;
5983     for (ElementCount VS = MaxVectorElementCount * 2;
5984          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5985       VFs.push_back(VS);
5986 
5987     // For each VF calculate its register usage.
5988     auto RUs = calculateRegisterUsage(VFs);
5989 
5990     // Select the largest VF which doesn't require more registers than existing
5991     // ones.
5992     for (int i = RUs.size() - 1; i >= 0; --i) {
5993       bool Selected = true;
5994       for (auto &pair : RUs[i].MaxLocalUsers) {
5995         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5996         if (pair.second > TargetNumRegisters)
5997           Selected = false;
5998       }
5999       if (Selected) {
6000         MaxVF = VFs[i];
6001         break;
6002       }
6003     }
6004     if (ElementCount MinVF =
6005             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
6006       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
6007         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
6008                           << ") with target's minimum: " << MinVF << '\n');
6009         MaxVF = MinVF;
6010       }
6011     }
6012   }
6013   return MaxVF;
6014 }
6015 
6016 bool LoopVectorizationCostModel::isMoreProfitable(
6017     const VectorizationFactor &A, const VectorizationFactor &B) const {
6018   InstructionCost::CostType CostA = *A.Cost.getValue();
6019   InstructionCost::CostType CostB = *B.Cost.getValue();
6020 
6021   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
6022 
6023   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
6024       MaxTripCount) {
6025     // If we are folding the tail and the trip count is a known (possibly small)
6026     // constant, the trip count will be rounded up to an integer number of
6027     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
6028     // which we compare directly. When not folding the tail, the total cost will
6029     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
6030     // approximated with the per-lane cost below instead of using the tripcount
6031     // as here.
6032     int64_t RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
6033     int64_t RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
6034     return RTCostA < RTCostB;
6035   }
6036 
6037   // When set to preferred, for now assume vscale may be larger than 1, so
6038   // that scalable vectorization is slightly favorable over fixed-width
6039   // vectorization.
6040   if (Hints->isScalableVectorizationPreferred())
6041     if (A.Width.isScalable() && !B.Width.isScalable())
6042       return (CostA * B.Width.getKnownMinValue()) <=
6043              (CostB * A.Width.getKnownMinValue());
6044 
6045   // To avoid the need for FP division:
6046   //      (CostA / A.Width) < (CostB / B.Width)
6047   // <=>  (CostA * B.Width) < (CostB * A.Width)
6048   return (CostA * B.Width.getKnownMinValue()) <
6049          (CostB * A.Width.getKnownMinValue());
6050 }
6051 
6052 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
6053     const ElementCountSet &VFCandidates) {
6054   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
6055   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
6056   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
6057   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
6058          "Expected Scalar VF to be a candidate");
6059 
6060   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
6061   VectorizationFactor ChosenFactor = ScalarCost;
6062 
6063   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6064   if (ForceVectorization && VFCandidates.size() > 1) {
6065     // Ignore scalar width, because the user explicitly wants vectorization.
6066     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6067     // evaluation.
6068     ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max();
6069   }
6070 
6071   for (const auto &i : VFCandidates) {
6072     // The cost for scalar VF=1 is already calculated, so ignore it.
6073     if (i.isScalar())
6074       continue;
6075 
6076     // Notice that the vector loop needs to be executed less times, so
6077     // we need to divide the cost of the vector loops by the width of
6078     // the vector elements.
6079     VectorizationCostTy C = expectedCost(i);
6080 
6081     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
6082     VectorizationFactor Candidate(i, C.first);
6083     LLVM_DEBUG(
6084         dbgs() << "LV: Vector loop of width " << i << " costs: "
6085                << (*Candidate.Cost.getValue() /
6086                    Candidate.Width.getKnownMinValue())
6087                << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "")
6088                << ".\n");
6089 
6090     if (!C.second && !ForceVectorization) {
6091       LLVM_DEBUG(
6092           dbgs() << "LV: Not considering vector loop of width " << i
6093                  << " because it will not generate any vector instructions.\n");
6094       continue;
6095     }
6096 
6097     // If profitable add it to ProfitableVF list.
6098     if (isMoreProfitable(Candidate, ScalarCost))
6099       ProfitableVFs.push_back(Candidate);
6100 
6101     if (isMoreProfitable(Candidate, ChosenFactor))
6102       ChosenFactor = Candidate;
6103   }
6104 
6105   if (!EnableCondStoresVectorization && NumPredStores) {
6106     reportVectorizationFailure("There are conditional stores.",
6107         "store that is conditionally executed prevents vectorization",
6108         "ConditionalStore", ORE, TheLoop);
6109     ChosenFactor = ScalarCost;
6110   }
6111 
6112   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
6113                  *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue())
6114                  dbgs()
6115              << "LV: Vectorization seems to be not beneficial, "
6116              << "but was forced by a user.\n");
6117   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
6118   return ChosenFactor;
6119 }
6120 
6121 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
6122     const Loop &L, ElementCount VF) const {
6123   // Cross iteration phis such as reductions need special handling and are
6124   // currently unsupported.
6125   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
6126         return Legal->isFirstOrderRecurrence(&Phi) ||
6127                Legal->isReductionVariable(&Phi);
6128       }))
6129     return false;
6130 
6131   // Phis with uses outside of the loop require special handling and are
6132   // currently unsupported.
6133   for (auto &Entry : Legal->getInductionVars()) {
6134     // Look for uses of the value of the induction at the last iteration.
6135     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
6136     for (User *U : PostInc->users())
6137       if (!L.contains(cast<Instruction>(U)))
6138         return false;
6139     // Look for uses of penultimate value of the induction.
6140     for (User *U : Entry.first->users())
6141       if (!L.contains(cast<Instruction>(U)))
6142         return false;
6143   }
6144 
6145   // Induction variables that are widened require special handling that is
6146   // currently not supported.
6147   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
6148         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
6149                  this->isProfitableToScalarize(Entry.first, VF));
6150       }))
6151     return false;
6152 
6153   return true;
6154 }
6155 
6156 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
6157     const ElementCount VF) const {
6158   // FIXME: We need a much better cost-model to take different parameters such
6159   // as register pressure, code size increase and cost of extra branches into
6160   // account. For now we apply a very crude heuristic and only consider loops
6161   // with vectorization factors larger than a certain value.
6162   // We also consider epilogue vectorization unprofitable for targets that don't
6163   // consider interleaving beneficial (eg. MVE).
6164   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
6165     return false;
6166   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
6167     return true;
6168   return false;
6169 }
6170 
6171 VectorizationFactor
6172 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
6173     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
6174   VectorizationFactor Result = VectorizationFactor::Disabled();
6175   if (!EnableEpilogueVectorization) {
6176     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
6177     return Result;
6178   }
6179 
6180   if (!isScalarEpilogueAllowed()) {
6181     LLVM_DEBUG(
6182         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
6183                   "allowed.\n";);
6184     return Result;
6185   }
6186 
6187   // FIXME: This can be fixed for scalable vectors later, because at this stage
6188   // the LoopVectorizer will only consider vectorizing a loop with scalable
6189   // vectors when the loop has a hint to enable vectorization for a given VF.
6190   if (MainLoopVF.isScalable()) {
6191     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
6192                          "yet supported.\n");
6193     return Result;
6194   }
6195 
6196   // Not really a cost consideration, but check for unsupported cases here to
6197   // simplify the logic.
6198   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
6199     LLVM_DEBUG(
6200         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
6201                   "not a supported candidate.\n";);
6202     return Result;
6203   }
6204 
6205   if (EpilogueVectorizationForceVF > 1) {
6206     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6207     if (LVP.hasPlanWithVFs(
6208             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6209       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6210     else {
6211       LLVM_DEBUG(
6212           dbgs()
6213               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6214       return Result;
6215     }
6216   }
6217 
6218   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6219       TheLoop->getHeader()->getParent()->hasMinSize()) {
6220     LLVM_DEBUG(
6221         dbgs()
6222             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6223     return Result;
6224   }
6225 
6226   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6227     return Result;
6228 
6229   for (auto &NextVF : ProfitableVFs)
6230     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6231         (Result.Width.getFixedValue() == 1 ||
6232          isMoreProfitable(NextVF, Result)) &&
6233         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6234       Result = NextVF;
6235 
6236   if (Result != VectorizationFactor::Disabled())
6237     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6238                       << Result.Width.getFixedValue() << "\n";);
6239   return Result;
6240 }
6241 
6242 std::pair<unsigned, unsigned>
6243 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6244   unsigned MinWidth = -1U;
6245   unsigned MaxWidth = 8;
6246   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6247 
6248   // For each block.
6249   for (BasicBlock *BB : TheLoop->blocks()) {
6250     // For each instruction in the loop.
6251     for (Instruction &I : BB->instructionsWithoutDebug()) {
6252       Type *T = I.getType();
6253 
6254       // Skip ignored values.
6255       if (ValuesToIgnore.count(&I))
6256         continue;
6257 
6258       // Only examine Loads, Stores and PHINodes.
6259       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6260         continue;
6261 
6262       // Examine PHI nodes that are reduction variables. Update the type to
6263       // account for the recurrence type.
6264       if (auto *PN = dyn_cast<PHINode>(&I)) {
6265         if (!Legal->isReductionVariable(PN))
6266           continue;
6267         const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN];
6268         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6269             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6270                                       RdxDesc.getRecurrenceType(),
6271                                       TargetTransformInfo::ReductionFlags()))
6272           continue;
6273         T = RdxDesc.getRecurrenceType();
6274       }
6275 
6276       // Examine the stored values.
6277       if (auto *ST = dyn_cast<StoreInst>(&I))
6278         T = ST->getValueOperand()->getType();
6279 
6280       // Ignore loaded pointer types and stored pointer types that are not
6281       // vectorizable.
6282       //
6283       // FIXME: The check here attempts to predict whether a load or store will
6284       //        be vectorized. We only know this for certain after a VF has
6285       //        been selected. Here, we assume that if an access can be
6286       //        vectorized, it will be. We should also look at extending this
6287       //        optimization to non-pointer types.
6288       //
6289       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6290           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6291         continue;
6292 
6293       MinWidth = std::min(MinWidth,
6294                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6295       MaxWidth = std::max(MaxWidth,
6296                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6297     }
6298   }
6299 
6300   return {MinWidth, MaxWidth};
6301 }
6302 
6303 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6304                                                            unsigned LoopCost) {
6305   // -- The interleave heuristics --
6306   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6307   // There are many micro-architectural considerations that we can't predict
6308   // at this level. For example, frontend pressure (on decode or fetch) due to
6309   // code size, or the number and capabilities of the execution ports.
6310   //
6311   // We use the following heuristics to select the interleave count:
6312   // 1. If the code has reductions, then we interleave to break the cross
6313   // iteration dependency.
6314   // 2. If the loop is really small, then we interleave to reduce the loop
6315   // overhead.
6316   // 3. We don't interleave if we think that we will spill registers to memory
6317   // due to the increased register pressure.
6318 
6319   if (!isScalarEpilogueAllowed())
6320     return 1;
6321 
6322   // We used the distance for the interleave count.
6323   if (Legal->getMaxSafeDepDistBytes() != -1U)
6324     return 1;
6325 
6326   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6327   const bool HasReductions = !Legal->getReductionVars().empty();
6328   // Do not interleave loops with a relatively small known or estimated trip
6329   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6330   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6331   // because with the above conditions interleaving can expose ILP and break
6332   // cross iteration dependences for reductions.
6333   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6334       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6335     return 1;
6336 
6337   RegisterUsage R = calculateRegisterUsage({VF})[0];
6338   // We divide by these constants so assume that we have at least one
6339   // instruction that uses at least one register.
6340   for (auto& pair : R.MaxLocalUsers) {
6341     pair.second = std::max(pair.second, 1U);
6342   }
6343 
6344   // We calculate the interleave count using the following formula.
6345   // Subtract the number of loop invariants from the number of available
6346   // registers. These registers are used by all of the interleaved instances.
6347   // Next, divide the remaining registers by the number of registers that is
6348   // required by the loop, in order to estimate how many parallel instances
6349   // fit without causing spills. All of this is rounded down if necessary to be
6350   // a power of two. We want power of two interleave count to simplify any
6351   // addressing operations or alignment considerations.
6352   // We also want power of two interleave counts to ensure that the induction
6353   // variable of the vector loop wraps to zero, when tail is folded by masking;
6354   // this currently happens when OptForSize, in which case IC is set to 1 above.
6355   unsigned IC = UINT_MAX;
6356 
6357   for (auto& pair : R.MaxLocalUsers) {
6358     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6359     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6360                       << " registers of "
6361                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6362     if (VF.isScalar()) {
6363       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6364         TargetNumRegisters = ForceTargetNumScalarRegs;
6365     } else {
6366       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6367         TargetNumRegisters = ForceTargetNumVectorRegs;
6368     }
6369     unsigned MaxLocalUsers = pair.second;
6370     unsigned LoopInvariantRegs = 0;
6371     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6372       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6373 
6374     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6375     // Don't count the induction variable as interleaved.
6376     if (EnableIndVarRegisterHeur) {
6377       TmpIC =
6378           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6379                         std::max(1U, (MaxLocalUsers - 1)));
6380     }
6381 
6382     IC = std::min(IC, TmpIC);
6383   }
6384 
6385   // Clamp the interleave ranges to reasonable counts.
6386   unsigned MaxInterleaveCount =
6387       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6388 
6389   // Check if the user has overridden the max.
6390   if (VF.isScalar()) {
6391     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6392       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6393   } else {
6394     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6395       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6396   }
6397 
6398   // If trip count is known or estimated compile time constant, limit the
6399   // interleave count to be less than the trip count divided by VF, provided it
6400   // is at least 1.
6401   //
6402   // For scalable vectors we can't know if interleaving is beneficial. It may
6403   // not be beneficial for small loops if none of the lanes in the second vector
6404   // iterations is enabled. However, for larger loops, there is likely to be a
6405   // similar benefit as for fixed-width vectors. For now, we choose to leave
6406   // the InterleaveCount as if vscale is '1', although if some information about
6407   // the vector is known (e.g. min vector size), we can make a better decision.
6408   if (BestKnownTC) {
6409     MaxInterleaveCount =
6410         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6411     // Make sure MaxInterleaveCount is greater than 0.
6412     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6413   }
6414 
6415   assert(MaxInterleaveCount > 0 &&
6416          "Maximum interleave count must be greater than 0");
6417 
6418   // Clamp the calculated IC to be between the 1 and the max interleave count
6419   // that the target and trip count allows.
6420   if (IC > MaxInterleaveCount)
6421     IC = MaxInterleaveCount;
6422   else
6423     // Make sure IC is greater than 0.
6424     IC = std::max(1u, IC);
6425 
6426   assert(IC > 0 && "Interleave count must be greater than 0.");
6427 
6428   // If we did not calculate the cost for VF (because the user selected the VF)
6429   // then we calculate the cost of VF here.
6430   if (LoopCost == 0) {
6431     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6432     LoopCost = *expectedCost(VF).first.getValue();
6433   }
6434 
6435   assert(LoopCost && "Non-zero loop cost expected");
6436 
6437   // Interleave if we vectorized this loop and there is a reduction that could
6438   // benefit from interleaving.
6439   if (VF.isVector() && HasReductions) {
6440     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6441     return IC;
6442   }
6443 
6444   // Note that if we've already vectorized the loop we will have done the
6445   // runtime check and so interleaving won't require further checks.
6446   bool InterleavingRequiresRuntimePointerCheck =
6447       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6448 
6449   // We want to interleave small loops in order to reduce the loop overhead and
6450   // potentially expose ILP opportunities.
6451   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6452                     << "LV: IC is " << IC << '\n'
6453                     << "LV: VF is " << VF << '\n');
6454   const bool AggressivelyInterleaveReductions =
6455       TTI.enableAggressiveInterleaving(HasReductions);
6456   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6457     // We assume that the cost overhead is 1 and we use the cost model
6458     // to estimate the cost of the loop and interleave until the cost of the
6459     // loop overhead is about 5% of the cost of the loop.
6460     unsigned SmallIC =
6461         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6462 
6463     // Interleave until store/load ports (estimated by max interleave count) are
6464     // saturated.
6465     unsigned NumStores = Legal->getNumStores();
6466     unsigned NumLoads = Legal->getNumLoads();
6467     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6468     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6469 
6470     // If we have a scalar reduction (vector reductions are already dealt with
6471     // by this point), we can increase the critical path length if the loop
6472     // we're interleaving is inside another loop. Limit, by default to 2, so the
6473     // critical path only gets increased by one reduction operation.
6474     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6475       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6476       SmallIC = std::min(SmallIC, F);
6477       StoresIC = std::min(StoresIC, F);
6478       LoadsIC = std::min(LoadsIC, F);
6479     }
6480 
6481     if (EnableLoadStoreRuntimeInterleave &&
6482         std::max(StoresIC, LoadsIC) > SmallIC) {
6483       LLVM_DEBUG(
6484           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6485       return std::max(StoresIC, LoadsIC);
6486     }
6487 
6488     // If there are scalar reductions and TTI has enabled aggressive
6489     // interleaving for reductions, we will interleave to expose ILP.
6490     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6491         AggressivelyInterleaveReductions) {
6492       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6493       // Interleave no less than SmallIC but not as aggressive as the normal IC
6494       // to satisfy the rare situation when resources are too limited.
6495       return std::max(IC / 2, SmallIC);
6496     } else {
6497       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6498       return SmallIC;
6499     }
6500   }
6501 
6502   // Interleave if this is a large loop (small loops are already dealt with by
6503   // this point) that could benefit from interleaving.
6504   if (AggressivelyInterleaveReductions) {
6505     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6506     return IC;
6507   }
6508 
6509   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6510   return 1;
6511 }
6512 
6513 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6514 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6515   // This function calculates the register usage by measuring the highest number
6516   // of values that are alive at a single location. Obviously, this is a very
6517   // rough estimation. We scan the loop in a topological order in order and
6518   // assign a number to each instruction. We use RPO to ensure that defs are
6519   // met before their users. We assume that each instruction that has in-loop
6520   // users starts an interval. We record every time that an in-loop value is
6521   // used, so we have a list of the first and last occurrences of each
6522   // instruction. Next, we transpose this data structure into a multi map that
6523   // holds the list of intervals that *end* at a specific location. This multi
6524   // map allows us to perform a linear search. We scan the instructions linearly
6525   // and record each time that a new interval starts, by placing it in a set.
6526   // If we find this value in the multi-map then we remove it from the set.
6527   // The max register usage is the maximum size of the set.
6528   // We also search for instructions that are defined outside the loop, but are
6529   // used inside the loop. We need this number separately from the max-interval
6530   // usage number because when we unroll, loop-invariant values do not take
6531   // more register.
6532   LoopBlocksDFS DFS(TheLoop);
6533   DFS.perform(LI);
6534 
6535   RegisterUsage RU;
6536 
6537   // Each 'key' in the map opens a new interval. The values
6538   // of the map are the index of the 'last seen' usage of the
6539   // instruction that is the key.
6540   using IntervalMap = DenseMap<Instruction *, unsigned>;
6541 
6542   // Maps instruction to its index.
6543   SmallVector<Instruction *, 64> IdxToInstr;
6544   // Marks the end of each interval.
6545   IntervalMap EndPoint;
6546   // Saves the list of instruction indices that are used in the loop.
6547   SmallPtrSet<Instruction *, 8> Ends;
6548   // Saves the list of values that are used in the loop but are
6549   // defined outside the loop, such as arguments and constants.
6550   SmallPtrSet<Value *, 8> LoopInvariants;
6551 
6552   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6553     for (Instruction &I : BB->instructionsWithoutDebug()) {
6554       IdxToInstr.push_back(&I);
6555 
6556       // Save the end location of each USE.
6557       for (Value *U : I.operands()) {
6558         auto *Instr = dyn_cast<Instruction>(U);
6559 
6560         // Ignore non-instruction values such as arguments, constants, etc.
6561         if (!Instr)
6562           continue;
6563 
6564         // If this instruction is outside the loop then record it and continue.
6565         if (!TheLoop->contains(Instr)) {
6566           LoopInvariants.insert(Instr);
6567           continue;
6568         }
6569 
6570         // Overwrite previous end points.
6571         EndPoint[Instr] = IdxToInstr.size();
6572         Ends.insert(Instr);
6573       }
6574     }
6575   }
6576 
6577   // Saves the list of intervals that end with the index in 'key'.
6578   using InstrList = SmallVector<Instruction *, 2>;
6579   DenseMap<unsigned, InstrList> TransposeEnds;
6580 
6581   // Transpose the EndPoints to a list of values that end at each index.
6582   for (auto &Interval : EndPoint)
6583     TransposeEnds[Interval.second].push_back(Interval.first);
6584 
6585   SmallPtrSet<Instruction *, 8> OpenIntervals;
6586   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6587   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6588 
6589   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6590 
6591   // A lambda that gets the register usage for the given type and VF.
6592   const auto &TTICapture = TTI;
6593   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6594     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6595       return 0;
6596     return *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6597   };
6598 
6599   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6600     Instruction *I = IdxToInstr[i];
6601 
6602     // Remove all of the instructions that end at this location.
6603     InstrList &List = TransposeEnds[i];
6604     for (Instruction *ToRemove : List)
6605       OpenIntervals.erase(ToRemove);
6606 
6607     // Ignore instructions that are never used within the loop.
6608     if (!Ends.count(I))
6609       continue;
6610 
6611     // Skip ignored values.
6612     if (ValuesToIgnore.count(I))
6613       continue;
6614 
6615     // For each VF find the maximum usage of registers.
6616     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6617       // Count the number of live intervals.
6618       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6619 
6620       if (VFs[j].isScalar()) {
6621         for (auto Inst : OpenIntervals) {
6622           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6623           if (RegUsage.find(ClassID) == RegUsage.end())
6624             RegUsage[ClassID] = 1;
6625           else
6626             RegUsage[ClassID] += 1;
6627         }
6628       } else {
6629         collectUniformsAndScalars(VFs[j]);
6630         for (auto Inst : OpenIntervals) {
6631           // Skip ignored values for VF > 1.
6632           if (VecValuesToIgnore.count(Inst))
6633             continue;
6634           if (isScalarAfterVectorization(Inst, VFs[j])) {
6635             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6636             if (RegUsage.find(ClassID) == RegUsage.end())
6637               RegUsage[ClassID] = 1;
6638             else
6639               RegUsage[ClassID] += 1;
6640           } else {
6641             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6642             if (RegUsage.find(ClassID) == RegUsage.end())
6643               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6644             else
6645               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6646           }
6647         }
6648       }
6649 
6650       for (auto& pair : RegUsage) {
6651         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6652           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6653         else
6654           MaxUsages[j][pair.first] = pair.second;
6655       }
6656     }
6657 
6658     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6659                       << OpenIntervals.size() << '\n');
6660 
6661     // Add the current instruction to the list of open intervals.
6662     OpenIntervals.insert(I);
6663   }
6664 
6665   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6666     SmallMapVector<unsigned, unsigned, 4> Invariant;
6667 
6668     for (auto Inst : LoopInvariants) {
6669       unsigned Usage =
6670           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6671       unsigned ClassID =
6672           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6673       if (Invariant.find(ClassID) == Invariant.end())
6674         Invariant[ClassID] = Usage;
6675       else
6676         Invariant[ClassID] += Usage;
6677     }
6678 
6679     LLVM_DEBUG({
6680       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6681       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6682              << " item\n";
6683       for (const auto &pair : MaxUsages[i]) {
6684         dbgs() << "LV(REG): RegisterClass: "
6685                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6686                << " registers\n";
6687       }
6688       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6689              << " item\n";
6690       for (const auto &pair : Invariant) {
6691         dbgs() << "LV(REG): RegisterClass: "
6692                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6693                << " registers\n";
6694       }
6695     });
6696 
6697     RU.LoopInvariantRegs = Invariant;
6698     RU.MaxLocalUsers = MaxUsages[i];
6699     RUs[i] = RU;
6700   }
6701 
6702   return RUs;
6703 }
6704 
6705 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6706   // TODO: Cost model for emulated masked load/store is completely
6707   // broken. This hack guides the cost model to use an artificially
6708   // high enough value to practically disable vectorization with such
6709   // operations, except where previously deployed legality hack allowed
6710   // using very low cost values. This is to avoid regressions coming simply
6711   // from moving "masked load/store" check from legality to cost model.
6712   // Masked Load/Gather emulation was previously never allowed.
6713   // Limited number of Masked Store/Scatter emulation was allowed.
6714   assert(isPredicatedInst(I) &&
6715          "Expecting a scalar emulated instruction");
6716   return isa<LoadInst>(I) ||
6717          (isa<StoreInst>(I) &&
6718           NumPredStores > NumberOfStoresToPredicate);
6719 }
6720 
6721 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6722   // If we aren't vectorizing the loop, or if we've already collected the
6723   // instructions to scalarize, there's nothing to do. Collection may already
6724   // have occurred if we have a user-selected VF and are now computing the
6725   // expected cost for interleaving.
6726   if (VF.isScalar() || VF.isZero() ||
6727       InstsToScalarize.find(VF) != InstsToScalarize.end())
6728     return;
6729 
6730   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6731   // not profitable to scalarize any instructions, the presence of VF in the
6732   // map will indicate that we've analyzed it already.
6733   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6734 
6735   // Find all the instructions that are scalar with predication in the loop and
6736   // determine if it would be better to not if-convert the blocks they are in.
6737   // If so, we also record the instructions to scalarize.
6738   for (BasicBlock *BB : TheLoop->blocks()) {
6739     if (!blockNeedsPredication(BB))
6740       continue;
6741     for (Instruction &I : *BB)
6742       if (isScalarWithPredication(&I)) {
6743         ScalarCostsTy ScalarCosts;
6744         // Do not apply discount logic if hacked cost is needed
6745         // for emulated masked memrefs.
6746         if (!useEmulatedMaskMemRefHack(&I) &&
6747             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6748           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6749         // Remember that BB will remain after vectorization.
6750         PredicatedBBsAfterVectorization.insert(BB);
6751       }
6752   }
6753 }
6754 
6755 int LoopVectorizationCostModel::computePredInstDiscount(
6756     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6757   assert(!isUniformAfterVectorization(PredInst, VF) &&
6758          "Instruction marked uniform-after-vectorization will be predicated");
6759 
6760   // Initialize the discount to zero, meaning that the scalar version and the
6761   // vector version cost the same.
6762   InstructionCost Discount = 0;
6763 
6764   // Holds instructions to analyze. The instructions we visit are mapped in
6765   // ScalarCosts. Those instructions are the ones that would be scalarized if
6766   // we find that the scalar version costs less.
6767   SmallVector<Instruction *, 8> Worklist;
6768 
6769   // Returns true if the given instruction can be scalarized.
6770   auto canBeScalarized = [&](Instruction *I) -> bool {
6771     // We only attempt to scalarize instructions forming a single-use chain
6772     // from the original predicated block that would otherwise be vectorized.
6773     // Although not strictly necessary, we give up on instructions we know will
6774     // already be scalar to avoid traversing chains that are unlikely to be
6775     // beneficial.
6776     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6777         isScalarAfterVectorization(I, VF))
6778       return false;
6779 
6780     // If the instruction is scalar with predication, it will be analyzed
6781     // separately. We ignore it within the context of PredInst.
6782     if (isScalarWithPredication(I))
6783       return false;
6784 
6785     // If any of the instruction's operands are uniform after vectorization,
6786     // the instruction cannot be scalarized. This prevents, for example, a
6787     // masked load from being scalarized.
6788     //
6789     // We assume we will only emit a value for lane zero of an instruction
6790     // marked uniform after vectorization, rather than VF identical values.
6791     // Thus, if we scalarize an instruction that uses a uniform, we would
6792     // create uses of values corresponding to the lanes we aren't emitting code
6793     // for. This behavior can be changed by allowing getScalarValue to clone
6794     // the lane zero values for uniforms rather than asserting.
6795     for (Use &U : I->operands())
6796       if (auto *J = dyn_cast<Instruction>(U.get()))
6797         if (isUniformAfterVectorization(J, VF))
6798           return false;
6799 
6800     // Otherwise, we can scalarize the instruction.
6801     return true;
6802   };
6803 
6804   // Compute the expected cost discount from scalarizing the entire expression
6805   // feeding the predicated instruction. We currently only consider expressions
6806   // that are single-use instruction chains.
6807   Worklist.push_back(PredInst);
6808   while (!Worklist.empty()) {
6809     Instruction *I = Worklist.pop_back_val();
6810 
6811     // If we've already analyzed the instruction, there's nothing to do.
6812     if (ScalarCosts.find(I) != ScalarCosts.end())
6813       continue;
6814 
6815     // Compute the cost of the vector instruction. Note that this cost already
6816     // includes the scalarization overhead of the predicated instruction.
6817     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6818 
6819     // Compute the cost of the scalarized instruction. This cost is the cost of
6820     // the instruction as if it wasn't if-converted and instead remained in the
6821     // predicated block. We will scale this cost by block probability after
6822     // computing the scalarization overhead.
6823     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6824     InstructionCost ScalarCost =
6825         VF.getKnownMinValue() *
6826         getInstructionCost(I, ElementCount::getFixed(1)).first;
6827 
6828     // Compute the scalarization overhead of needed insertelement instructions
6829     // and phi nodes.
6830     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6831       ScalarCost += TTI.getScalarizationOverhead(
6832           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6833           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6834       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6835       ScalarCost +=
6836           VF.getKnownMinValue() *
6837           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6838     }
6839 
6840     // Compute the scalarization overhead of needed extractelement
6841     // instructions. For each of the instruction's operands, if the operand can
6842     // be scalarized, add it to the worklist; otherwise, account for the
6843     // overhead.
6844     for (Use &U : I->operands())
6845       if (auto *J = dyn_cast<Instruction>(U.get())) {
6846         assert(VectorType::isValidElementType(J->getType()) &&
6847                "Instruction has non-scalar type");
6848         if (canBeScalarized(J))
6849           Worklist.push_back(J);
6850         else if (needsExtract(J, VF)) {
6851           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6852           ScalarCost += TTI.getScalarizationOverhead(
6853               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6854               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6855         }
6856       }
6857 
6858     // Scale the total scalar cost by block probability.
6859     ScalarCost /= getReciprocalPredBlockProb();
6860 
6861     // Compute the discount. A non-negative discount means the vector version
6862     // of the instruction costs more, and scalarizing would be beneficial.
6863     Discount += VectorCost - ScalarCost;
6864     ScalarCosts[I] = ScalarCost;
6865   }
6866 
6867   return *Discount.getValue();
6868 }
6869 
6870 LoopVectorizationCostModel::VectorizationCostTy
6871 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6872   VectorizationCostTy Cost;
6873 
6874   // For each block.
6875   for (BasicBlock *BB : TheLoop->blocks()) {
6876     VectorizationCostTy BlockCost;
6877 
6878     // For each instruction in the old loop.
6879     for (Instruction &I : BB->instructionsWithoutDebug()) {
6880       // Skip ignored values.
6881       if (ValuesToIgnore.count(&I) ||
6882           (VF.isVector() && VecValuesToIgnore.count(&I)))
6883         continue;
6884 
6885       VectorizationCostTy C = getInstructionCost(&I, VF);
6886 
6887       // Check if we should override the cost.
6888       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6889         C.first = InstructionCost(ForceTargetInstructionCost);
6890 
6891       BlockCost.first += C.first;
6892       BlockCost.second |= C.second;
6893       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6894                         << " for VF " << VF << " For instruction: " << I
6895                         << '\n');
6896     }
6897 
6898     // If we are vectorizing a predicated block, it will have been
6899     // if-converted. This means that the block's instructions (aside from
6900     // stores and instructions that may divide by zero) will now be
6901     // unconditionally executed. For the scalar case, we may not always execute
6902     // the predicated block, if it is an if-else block. Thus, scale the block's
6903     // cost by the probability of executing it. blockNeedsPredication from
6904     // Legal is used so as to not include all blocks in tail folded loops.
6905     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6906       BlockCost.first /= getReciprocalPredBlockProb();
6907 
6908     Cost.first += BlockCost.first;
6909     Cost.second |= BlockCost.second;
6910   }
6911 
6912   return Cost;
6913 }
6914 
6915 /// Gets Address Access SCEV after verifying that the access pattern
6916 /// is loop invariant except the induction variable dependence.
6917 ///
6918 /// This SCEV can be sent to the Target in order to estimate the address
6919 /// calculation cost.
6920 static const SCEV *getAddressAccessSCEV(
6921               Value *Ptr,
6922               LoopVectorizationLegality *Legal,
6923               PredicatedScalarEvolution &PSE,
6924               const Loop *TheLoop) {
6925 
6926   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6927   if (!Gep)
6928     return nullptr;
6929 
6930   // We are looking for a gep with all loop invariant indices except for one
6931   // which should be an induction variable.
6932   auto SE = PSE.getSE();
6933   unsigned NumOperands = Gep->getNumOperands();
6934   for (unsigned i = 1; i < NumOperands; ++i) {
6935     Value *Opd = Gep->getOperand(i);
6936     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6937         !Legal->isInductionVariable(Opd))
6938       return nullptr;
6939   }
6940 
6941   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6942   return PSE.getSCEV(Ptr);
6943 }
6944 
6945 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6946   return Legal->hasStride(I->getOperand(0)) ||
6947          Legal->hasStride(I->getOperand(1));
6948 }
6949 
6950 InstructionCost
6951 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6952                                                         ElementCount VF) {
6953   assert(VF.isVector() &&
6954          "Scalarization cost of instruction implies vectorization.");
6955   if (VF.isScalable())
6956     return InstructionCost::getInvalid();
6957 
6958   Type *ValTy = getLoadStoreType(I);
6959   auto SE = PSE.getSE();
6960 
6961   unsigned AS = getLoadStoreAddressSpace(I);
6962   Value *Ptr = getLoadStorePointerOperand(I);
6963   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6964 
6965   // Figure out whether the access is strided and get the stride value
6966   // if it's known in compile time
6967   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6968 
6969   // Get the cost of the scalar memory instruction and address computation.
6970   InstructionCost Cost =
6971       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6972 
6973   // Don't pass *I here, since it is scalar but will actually be part of a
6974   // vectorized loop where the user of it is a vectorized instruction.
6975   const Align Alignment = getLoadStoreAlignment(I);
6976   Cost += VF.getKnownMinValue() *
6977           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6978                               AS, TTI::TCK_RecipThroughput);
6979 
6980   // Get the overhead of the extractelement and insertelement instructions
6981   // we might create due to scalarization.
6982   Cost += getScalarizationOverhead(I, VF);
6983 
6984   // If we have a predicated load/store, it will need extra i1 extracts and
6985   // conditional branches, but may not be executed for each vector lane. Scale
6986   // the cost by the probability of executing the predicated block.
6987   if (isPredicatedInst(I)) {
6988     Cost /= getReciprocalPredBlockProb();
6989 
6990     // Add the cost of an i1 extract and a branch
6991     auto *Vec_i1Ty =
6992         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6993     Cost += TTI.getScalarizationOverhead(
6994         Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
6995         /*Insert=*/false, /*Extract=*/true);
6996     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6997 
6998     if (useEmulatedMaskMemRefHack(I))
6999       // Artificially setting to a high enough value to practically disable
7000       // vectorization with such operations.
7001       Cost = 3000000;
7002   }
7003 
7004   return Cost;
7005 }
7006 
7007 InstructionCost
7008 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
7009                                                     ElementCount VF) {
7010   Type *ValTy = getLoadStoreType(I);
7011   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7012   Value *Ptr = getLoadStorePointerOperand(I);
7013   unsigned AS = getLoadStoreAddressSpace(I);
7014   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
7015   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7016 
7017   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7018          "Stride should be 1 or -1 for consecutive memory access");
7019   const Align Alignment = getLoadStoreAlignment(I);
7020   InstructionCost Cost = 0;
7021   if (Legal->isMaskRequired(I))
7022     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7023                                       CostKind);
7024   else
7025     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7026                                 CostKind, I);
7027 
7028   bool Reverse = ConsecutiveStride < 0;
7029   if (Reverse)
7030     Cost +=
7031         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7032   return Cost;
7033 }
7034 
7035 InstructionCost
7036 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
7037                                                 ElementCount VF) {
7038   assert(Legal->isUniformMemOp(*I));
7039 
7040   Type *ValTy = getLoadStoreType(I);
7041   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7042   const Align Alignment = getLoadStoreAlignment(I);
7043   unsigned AS = getLoadStoreAddressSpace(I);
7044   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7045   if (isa<LoadInst>(I)) {
7046     return TTI.getAddressComputationCost(ValTy) +
7047            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
7048                                CostKind) +
7049            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7050   }
7051   StoreInst *SI = cast<StoreInst>(I);
7052 
7053   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
7054   return TTI.getAddressComputationCost(ValTy) +
7055          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
7056                              CostKind) +
7057          (isLoopInvariantStoreValue
7058               ? 0
7059               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
7060                                        VF.getKnownMinValue() - 1));
7061 }
7062 
7063 InstructionCost
7064 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7065                                                  ElementCount VF) {
7066   Type *ValTy = getLoadStoreType(I);
7067   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7068   const Align Alignment = getLoadStoreAlignment(I);
7069   const Value *Ptr = getLoadStorePointerOperand(I);
7070 
7071   return TTI.getAddressComputationCost(VectorTy) +
7072          TTI.getGatherScatterOpCost(
7073              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
7074              TargetTransformInfo::TCK_RecipThroughput, I);
7075 }
7076 
7077 InstructionCost
7078 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7079                                                    ElementCount VF) {
7080   // TODO: Once we have support for interleaving with scalable vectors
7081   // we can calculate the cost properly here.
7082   if (VF.isScalable())
7083     return InstructionCost::getInvalid();
7084 
7085   Type *ValTy = getLoadStoreType(I);
7086   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7087   unsigned AS = getLoadStoreAddressSpace(I);
7088 
7089   auto Group = getInterleavedAccessGroup(I);
7090   assert(Group && "Fail to get an interleaved access group.");
7091 
7092   unsigned InterleaveFactor = Group->getFactor();
7093   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7094 
7095   // Holds the indices of existing members in an interleaved load group.
7096   // An interleaved store group doesn't need this as it doesn't allow gaps.
7097   SmallVector<unsigned, 4> Indices;
7098   if (isa<LoadInst>(I)) {
7099     for (unsigned i = 0; i < InterleaveFactor; i++)
7100       if (Group->getMember(i))
7101         Indices.push_back(i);
7102   }
7103 
7104   // Calculate the cost of the whole interleaved group.
7105   bool UseMaskForGaps =
7106       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
7107   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
7108       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
7109       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
7110 
7111   if (Group->isReverse()) {
7112     // TODO: Add support for reversed masked interleaved access.
7113     assert(!Legal->isMaskRequired(I) &&
7114            "Reverse masked interleaved access not supported.");
7115     Cost +=
7116         Group->getNumMembers() *
7117         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7118   }
7119   return Cost;
7120 }
7121 
7122 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
7123     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
7124   // Early exit for no inloop reductions
7125   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
7126     return InstructionCost::getInvalid();
7127   auto *VectorTy = cast<VectorType>(Ty);
7128 
7129   // We are looking for a pattern of, and finding the minimal acceptable cost:
7130   //  reduce(mul(ext(A), ext(B))) or
7131   //  reduce(mul(A, B)) or
7132   //  reduce(ext(A)) or
7133   //  reduce(A).
7134   // The basic idea is that we walk down the tree to do that, finding the root
7135   // reduction instruction in InLoopReductionImmediateChains. From there we find
7136   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
7137   // of the components. If the reduction cost is lower then we return it for the
7138   // reduction instruction and 0 for the other instructions in the pattern. If
7139   // it is not we return an invalid cost specifying the orignal cost method
7140   // should be used.
7141   Instruction *RetI = I;
7142   if ((RetI->getOpcode() == Instruction::SExt ||
7143        RetI->getOpcode() == Instruction::ZExt)) {
7144     if (!RetI->hasOneUser())
7145       return InstructionCost::getInvalid();
7146     RetI = RetI->user_back();
7147   }
7148   if (RetI->getOpcode() == Instruction::Mul &&
7149       RetI->user_back()->getOpcode() == Instruction::Add) {
7150     if (!RetI->hasOneUser())
7151       return InstructionCost::getInvalid();
7152     RetI = RetI->user_back();
7153   }
7154 
7155   // Test if the found instruction is a reduction, and if not return an invalid
7156   // cost specifying the parent to use the original cost modelling.
7157   if (!InLoopReductionImmediateChains.count(RetI))
7158     return InstructionCost::getInvalid();
7159 
7160   // Find the reduction this chain is a part of and calculate the basic cost of
7161   // the reduction on its own.
7162   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
7163   Instruction *ReductionPhi = LastChain;
7164   while (!isa<PHINode>(ReductionPhi))
7165     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
7166 
7167   const RecurrenceDescriptor &RdxDesc =
7168       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
7169   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
7170       RdxDesc.getOpcode(), VectorTy, false, CostKind);
7171 
7172   // Get the operand that was not the reduction chain and match it to one of the
7173   // patterns, returning the better cost if it is found.
7174   Instruction *RedOp = RetI->getOperand(1) == LastChain
7175                            ? dyn_cast<Instruction>(RetI->getOperand(0))
7176                            : dyn_cast<Instruction>(RetI->getOperand(1));
7177 
7178   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7179 
7180   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
7181       !TheLoop->isLoopInvariant(RedOp)) {
7182     bool IsUnsigned = isa<ZExtInst>(RedOp);
7183     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7184     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7185         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7186         CostKind);
7187 
7188     InstructionCost ExtCost =
7189         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7190                              TTI::CastContextHint::None, CostKind, RedOp);
7191     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7192       return I == RetI ? *RedCost.getValue() : 0;
7193   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
7194     Instruction *Mul = RedOp;
7195     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
7196     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
7197     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
7198         Op0->getOpcode() == Op1->getOpcode() &&
7199         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7200         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7201       bool IsUnsigned = isa<ZExtInst>(Op0);
7202       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7203       // reduce(mul(ext, ext))
7204       InstructionCost ExtCost =
7205           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7206                                TTI::CastContextHint::None, CostKind, Op0);
7207       InstructionCost MulCost =
7208           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7209 
7210       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7211           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7212           CostKind);
7213 
7214       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7215         return I == RetI ? *RedCost.getValue() : 0;
7216     } else {
7217       InstructionCost MulCost =
7218           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7219 
7220       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7221           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7222           CostKind);
7223 
7224       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7225         return I == RetI ? *RedCost.getValue() : 0;
7226     }
7227   }
7228 
7229   return I == RetI ? BaseCost : InstructionCost::getInvalid();
7230 }
7231 
7232 InstructionCost
7233 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7234                                                      ElementCount VF) {
7235   // Calculate scalar cost only. Vectorization cost should be ready at this
7236   // moment.
7237   if (VF.isScalar()) {
7238     Type *ValTy = getLoadStoreType(I);
7239     const Align Alignment = getLoadStoreAlignment(I);
7240     unsigned AS = getLoadStoreAddressSpace(I);
7241 
7242     return TTI.getAddressComputationCost(ValTy) +
7243            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7244                                TTI::TCK_RecipThroughput, I);
7245   }
7246   return getWideningCost(I, VF);
7247 }
7248 
7249 LoopVectorizationCostModel::VectorizationCostTy
7250 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7251                                                ElementCount VF) {
7252   // If we know that this instruction will remain uniform, check the cost of
7253   // the scalar version.
7254   if (isUniformAfterVectorization(I, VF))
7255     VF = ElementCount::getFixed(1);
7256 
7257   if (VF.isVector() && isProfitableToScalarize(I, VF))
7258     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7259 
7260   // Forced scalars do not have any scalarization overhead.
7261   auto ForcedScalar = ForcedScalars.find(VF);
7262   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7263     auto InstSet = ForcedScalar->second;
7264     if (InstSet.count(I))
7265       return VectorizationCostTy(
7266           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7267            VF.getKnownMinValue()),
7268           false);
7269   }
7270 
7271   Type *VectorTy;
7272   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7273 
7274   bool TypeNotScalarized =
7275       VF.isVector() && VectorTy->isVectorTy() &&
7276       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7277   return VectorizationCostTy(C, TypeNotScalarized);
7278 }
7279 
7280 InstructionCost
7281 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7282                                                      ElementCount VF) const {
7283 
7284   if (VF.isScalable())
7285     return InstructionCost::getInvalid();
7286 
7287   if (VF.isScalar())
7288     return 0;
7289 
7290   InstructionCost Cost = 0;
7291   Type *RetTy = ToVectorTy(I->getType(), VF);
7292   if (!RetTy->isVoidTy() &&
7293       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7294     Cost += TTI.getScalarizationOverhead(
7295         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7296         true, false);
7297 
7298   // Some targets keep addresses scalar.
7299   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7300     return Cost;
7301 
7302   // Some targets support efficient element stores.
7303   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7304     return Cost;
7305 
7306   // Collect operands to consider.
7307   CallInst *CI = dyn_cast<CallInst>(I);
7308   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7309 
7310   // Skip operands that do not require extraction/scalarization and do not incur
7311   // any overhead.
7312   SmallVector<Type *> Tys;
7313   for (auto *V : filterExtractingOperands(Ops, VF))
7314     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7315   return Cost + TTI.getOperandsScalarizationOverhead(
7316                     filterExtractingOperands(Ops, VF), Tys);
7317 }
7318 
7319 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7320   if (VF.isScalar())
7321     return;
7322   NumPredStores = 0;
7323   for (BasicBlock *BB : TheLoop->blocks()) {
7324     // For each instruction in the old loop.
7325     for (Instruction &I : *BB) {
7326       Value *Ptr =  getLoadStorePointerOperand(&I);
7327       if (!Ptr)
7328         continue;
7329 
7330       // TODO: We should generate better code and update the cost model for
7331       // predicated uniform stores. Today they are treated as any other
7332       // predicated store (see added test cases in
7333       // invariant-store-vectorization.ll).
7334       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7335         NumPredStores++;
7336 
7337       if (Legal->isUniformMemOp(I)) {
7338         // TODO: Avoid replicating loads and stores instead of
7339         // relying on instcombine to remove them.
7340         // Load: Scalar load + broadcast
7341         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7342         InstructionCost Cost;
7343         if (isa<StoreInst>(&I) && VF.isScalable() &&
7344             isLegalGatherOrScatter(&I)) {
7345           Cost = getGatherScatterCost(&I, VF);
7346           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7347         } else {
7348           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7349                  "Cannot yet scalarize uniform stores");
7350           Cost = getUniformMemOpCost(&I, VF);
7351           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7352         }
7353         continue;
7354       }
7355 
7356       // We assume that widening is the best solution when possible.
7357       if (memoryInstructionCanBeWidened(&I, VF)) {
7358         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7359         int ConsecutiveStride =
7360                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7361         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7362                "Expected consecutive stride.");
7363         InstWidening Decision =
7364             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7365         setWideningDecision(&I, VF, Decision, Cost);
7366         continue;
7367       }
7368 
7369       // Choose between Interleaving, Gather/Scatter or Scalarization.
7370       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7371       unsigned NumAccesses = 1;
7372       if (isAccessInterleaved(&I)) {
7373         auto Group = getInterleavedAccessGroup(&I);
7374         assert(Group && "Fail to get an interleaved access group.");
7375 
7376         // Make one decision for the whole group.
7377         if (getWideningDecision(&I, VF) != CM_Unknown)
7378           continue;
7379 
7380         NumAccesses = Group->getNumMembers();
7381         if (interleavedAccessCanBeWidened(&I, VF))
7382           InterleaveCost = getInterleaveGroupCost(&I, VF);
7383       }
7384 
7385       InstructionCost GatherScatterCost =
7386           isLegalGatherOrScatter(&I)
7387               ? getGatherScatterCost(&I, VF) * NumAccesses
7388               : InstructionCost::getInvalid();
7389 
7390       InstructionCost ScalarizationCost =
7391           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7392 
7393       // Choose better solution for the current VF,
7394       // write down this decision and use it during vectorization.
7395       InstructionCost Cost;
7396       InstWidening Decision;
7397       if (InterleaveCost <= GatherScatterCost &&
7398           InterleaveCost < ScalarizationCost) {
7399         Decision = CM_Interleave;
7400         Cost = InterleaveCost;
7401       } else if (GatherScatterCost < ScalarizationCost) {
7402         Decision = CM_GatherScatter;
7403         Cost = GatherScatterCost;
7404       } else {
7405         assert(!VF.isScalable() &&
7406                "We cannot yet scalarise for scalable vectors");
7407         Decision = CM_Scalarize;
7408         Cost = ScalarizationCost;
7409       }
7410       // If the instructions belongs to an interleave group, the whole group
7411       // receives the same decision. The whole group receives the cost, but
7412       // the cost will actually be assigned to one instruction.
7413       if (auto Group = getInterleavedAccessGroup(&I))
7414         setWideningDecision(Group, VF, Decision, Cost);
7415       else
7416         setWideningDecision(&I, VF, Decision, Cost);
7417     }
7418   }
7419 
7420   // Make sure that any load of address and any other address computation
7421   // remains scalar unless there is gather/scatter support. This avoids
7422   // inevitable extracts into address registers, and also has the benefit of
7423   // activating LSR more, since that pass can't optimize vectorized
7424   // addresses.
7425   if (TTI.prefersVectorizedAddressing())
7426     return;
7427 
7428   // Start with all scalar pointer uses.
7429   SmallPtrSet<Instruction *, 8> AddrDefs;
7430   for (BasicBlock *BB : TheLoop->blocks())
7431     for (Instruction &I : *BB) {
7432       Instruction *PtrDef =
7433         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7434       if (PtrDef && TheLoop->contains(PtrDef) &&
7435           getWideningDecision(&I, VF) != CM_GatherScatter)
7436         AddrDefs.insert(PtrDef);
7437     }
7438 
7439   // Add all instructions used to generate the addresses.
7440   SmallVector<Instruction *, 4> Worklist;
7441   append_range(Worklist, AddrDefs);
7442   while (!Worklist.empty()) {
7443     Instruction *I = Worklist.pop_back_val();
7444     for (auto &Op : I->operands())
7445       if (auto *InstOp = dyn_cast<Instruction>(Op))
7446         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7447             AddrDefs.insert(InstOp).second)
7448           Worklist.push_back(InstOp);
7449   }
7450 
7451   for (auto *I : AddrDefs) {
7452     if (isa<LoadInst>(I)) {
7453       // Setting the desired widening decision should ideally be handled in
7454       // by cost functions, but since this involves the task of finding out
7455       // if the loaded register is involved in an address computation, it is
7456       // instead changed here when we know this is the case.
7457       InstWidening Decision = getWideningDecision(I, VF);
7458       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7459         // Scalarize a widened load of address.
7460         setWideningDecision(
7461             I, VF, CM_Scalarize,
7462             (VF.getKnownMinValue() *
7463              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7464       else if (auto Group = getInterleavedAccessGroup(I)) {
7465         // Scalarize an interleave group of address loads.
7466         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7467           if (Instruction *Member = Group->getMember(I))
7468             setWideningDecision(
7469                 Member, VF, CM_Scalarize,
7470                 (VF.getKnownMinValue() *
7471                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7472         }
7473       }
7474     } else
7475       // Make sure I gets scalarized and a cost estimate without
7476       // scalarization overhead.
7477       ForcedScalars[VF].insert(I);
7478   }
7479 }
7480 
7481 InstructionCost
7482 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7483                                                Type *&VectorTy) {
7484   Type *RetTy = I->getType();
7485   if (canTruncateToMinimalBitwidth(I, VF))
7486     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7487   auto SE = PSE.getSE();
7488   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7489 
7490   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7491                                                 ElementCount VF) -> bool {
7492     if (VF.isScalar())
7493       return true;
7494 
7495     auto Scalarized = InstsToScalarize.find(VF);
7496     assert(Scalarized != InstsToScalarize.end() &&
7497            "VF not yet analyzed for scalarization profitability");
7498     return !Scalarized->second.count(I) &&
7499            llvm::all_of(I->users(), [&](User *U) {
7500              auto *UI = cast<Instruction>(U);
7501              return !Scalarized->second.count(UI);
7502            });
7503   };
7504   (void) hasSingleCopyAfterVectorization;
7505 
7506   if (isScalarAfterVectorization(I, VF)) {
7507     // With the exception of GEPs and PHIs, after scalarization there should
7508     // only be one copy of the instruction generated in the loop. This is
7509     // because the VF is either 1, or any instructions that need scalarizing
7510     // have already been dealt with by the the time we get here. As a result,
7511     // it means we don't have to multiply the instruction cost by VF.
7512     assert(I->getOpcode() == Instruction::GetElementPtr ||
7513            I->getOpcode() == Instruction::PHI ||
7514            (I->getOpcode() == Instruction::BitCast &&
7515             I->getType()->isPointerTy()) ||
7516            hasSingleCopyAfterVectorization(I, VF));
7517     VectorTy = RetTy;
7518   } else
7519     VectorTy = ToVectorTy(RetTy, VF);
7520 
7521   // TODO: We need to estimate the cost of intrinsic calls.
7522   switch (I->getOpcode()) {
7523   case Instruction::GetElementPtr:
7524     // We mark this instruction as zero-cost because the cost of GEPs in
7525     // vectorized code depends on whether the corresponding memory instruction
7526     // is scalarized or not. Therefore, we handle GEPs with the memory
7527     // instruction cost.
7528     return 0;
7529   case Instruction::Br: {
7530     // In cases of scalarized and predicated instructions, there will be VF
7531     // predicated blocks in the vectorized loop. Each branch around these
7532     // blocks requires also an extract of its vector compare i1 element.
7533     bool ScalarPredicatedBB = false;
7534     BranchInst *BI = cast<BranchInst>(I);
7535     if (VF.isVector() && BI->isConditional() &&
7536         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7537          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7538       ScalarPredicatedBB = true;
7539 
7540     if (ScalarPredicatedBB) {
7541       // Return cost for branches around scalarized and predicated blocks.
7542       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7543       auto *Vec_i1Ty =
7544           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7545       return (TTI.getScalarizationOverhead(
7546                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7547                   false, true) +
7548               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7549                VF.getKnownMinValue()));
7550     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7551       // The back-edge branch will remain, as will all scalar branches.
7552       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7553     else
7554       // This branch will be eliminated by if-conversion.
7555       return 0;
7556     // Note: We currently assume zero cost for an unconditional branch inside
7557     // a predicated block since it will become a fall-through, although we
7558     // may decide in the future to call TTI for all branches.
7559   }
7560   case Instruction::PHI: {
7561     auto *Phi = cast<PHINode>(I);
7562 
7563     // First-order recurrences are replaced by vector shuffles inside the loop.
7564     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7565     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7566       return TTI.getShuffleCost(
7567           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7568           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7569 
7570     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7571     // converted into select instructions. We require N - 1 selects per phi
7572     // node, where N is the number of incoming values.
7573     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7574       return (Phi->getNumIncomingValues() - 1) *
7575              TTI.getCmpSelInstrCost(
7576                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7577                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7578                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7579 
7580     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7581   }
7582   case Instruction::UDiv:
7583   case Instruction::SDiv:
7584   case Instruction::URem:
7585   case Instruction::SRem:
7586     // If we have a predicated instruction, it may not be executed for each
7587     // vector lane. Get the scalarization cost and scale this amount by the
7588     // probability of executing the predicated block. If the instruction is not
7589     // predicated, we fall through to the next case.
7590     if (VF.isVector() && isScalarWithPredication(I)) {
7591       InstructionCost Cost = 0;
7592 
7593       // These instructions have a non-void type, so account for the phi nodes
7594       // that we will create. This cost is likely to be zero. The phi node
7595       // cost, if any, should be scaled by the block probability because it
7596       // models a copy at the end of each predicated block.
7597       Cost += VF.getKnownMinValue() *
7598               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7599 
7600       // The cost of the non-predicated instruction.
7601       Cost += VF.getKnownMinValue() *
7602               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7603 
7604       // The cost of insertelement and extractelement instructions needed for
7605       // scalarization.
7606       Cost += getScalarizationOverhead(I, VF);
7607 
7608       // Scale the cost by the probability of executing the predicated blocks.
7609       // This assumes the predicated block for each vector lane is equally
7610       // likely.
7611       return Cost / getReciprocalPredBlockProb();
7612     }
7613     LLVM_FALLTHROUGH;
7614   case Instruction::Add:
7615   case Instruction::FAdd:
7616   case Instruction::Sub:
7617   case Instruction::FSub:
7618   case Instruction::Mul:
7619   case Instruction::FMul:
7620   case Instruction::FDiv:
7621   case Instruction::FRem:
7622   case Instruction::Shl:
7623   case Instruction::LShr:
7624   case Instruction::AShr:
7625   case Instruction::And:
7626   case Instruction::Or:
7627   case Instruction::Xor: {
7628     // Since we will replace the stride by 1 the multiplication should go away.
7629     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7630       return 0;
7631 
7632     // Detect reduction patterns
7633     InstructionCost RedCost;
7634     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7635             .isValid())
7636       return RedCost;
7637 
7638     // Certain instructions can be cheaper to vectorize if they have a constant
7639     // second vector operand. One example of this are shifts on x86.
7640     Value *Op2 = I->getOperand(1);
7641     TargetTransformInfo::OperandValueProperties Op2VP;
7642     TargetTransformInfo::OperandValueKind Op2VK =
7643         TTI.getOperandInfo(Op2, Op2VP);
7644     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7645       Op2VK = TargetTransformInfo::OK_UniformValue;
7646 
7647     SmallVector<const Value *, 4> Operands(I->operand_values());
7648     return TTI.getArithmeticInstrCost(
7649         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7650         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7651   }
7652   case Instruction::FNeg: {
7653     return TTI.getArithmeticInstrCost(
7654         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7655         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7656         TargetTransformInfo::OP_None, I->getOperand(0), I);
7657   }
7658   case Instruction::Select: {
7659     SelectInst *SI = cast<SelectInst>(I);
7660     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7661     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7662 
7663     const Value *Op0, *Op1;
7664     using namespace llvm::PatternMatch;
7665     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7666                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7667       // select x, y, false --> x & y
7668       // select x, true, y --> x | y
7669       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7670       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7671       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7672       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7673       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7674               Op1->getType()->getScalarSizeInBits() == 1);
7675 
7676       SmallVector<const Value *, 2> Operands{Op0, Op1};
7677       return TTI.getArithmeticInstrCost(
7678           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7679           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7680     }
7681 
7682     Type *CondTy = SI->getCondition()->getType();
7683     if (!ScalarCond)
7684       CondTy = VectorType::get(CondTy, VF);
7685     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7686                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7687   }
7688   case Instruction::ICmp:
7689   case Instruction::FCmp: {
7690     Type *ValTy = I->getOperand(0)->getType();
7691     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7692     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7693       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7694     VectorTy = ToVectorTy(ValTy, VF);
7695     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7696                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7697   }
7698   case Instruction::Store:
7699   case Instruction::Load: {
7700     ElementCount Width = VF;
7701     if (Width.isVector()) {
7702       InstWidening Decision = getWideningDecision(I, Width);
7703       assert(Decision != CM_Unknown &&
7704              "CM decision should be taken at this point");
7705       if (Decision == CM_Scalarize)
7706         Width = ElementCount::getFixed(1);
7707     }
7708     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7709     return getMemoryInstructionCost(I, VF);
7710   }
7711   case Instruction::BitCast:
7712     if (I->getType()->isPointerTy())
7713       return 0;
7714     LLVM_FALLTHROUGH;
7715   case Instruction::ZExt:
7716   case Instruction::SExt:
7717   case Instruction::FPToUI:
7718   case Instruction::FPToSI:
7719   case Instruction::FPExt:
7720   case Instruction::PtrToInt:
7721   case Instruction::IntToPtr:
7722   case Instruction::SIToFP:
7723   case Instruction::UIToFP:
7724   case Instruction::Trunc:
7725   case Instruction::FPTrunc: {
7726     // Computes the CastContextHint from a Load/Store instruction.
7727     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7728       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7729              "Expected a load or a store!");
7730 
7731       if (VF.isScalar() || !TheLoop->contains(I))
7732         return TTI::CastContextHint::Normal;
7733 
7734       switch (getWideningDecision(I, VF)) {
7735       case LoopVectorizationCostModel::CM_GatherScatter:
7736         return TTI::CastContextHint::GatherScatter;
7737       case LoopVectorizationCostModel::CM_Interleave:
7738         return TTI::CastContextHint::Interleave;
7739       case LoopVectorizationCostModel::CM_Scalarize:
7740       case LoopVectorizationCostModel::CM_Widen:
7741         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7742                                         : TTI::CastContextHint::Normal;
7743       case LoopVectorizationCostModel::CM_Widen_Reverse:
7744         return TTI::CastContextHint::Reversed;
7745       case LoopVectorizationCostModel::CM_Unknown:
7746         llvm_unreachable("Instr did not go through cost modelling?");
7747       }
7748 
7749       llvm_unreachable("Unhandled case!");
7750     };
7751 
7752     unsigned Opcode = I->getOpcode();
7753     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7754     // For Trunc, the context is the only user, which must be a StoreInst.
7755     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7756       if (I->hasOneUse())
7757         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7758           CCH = ComputeCCH(Store);
7759     }
7760     // For Z/Sext, the context is the operand, which must be a LoadInst.
7761     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7762              Opcode == Instruction::FPExt) {
7763       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7764         CCH = ComputeCCH(Load);
7765     }
7766 
7767     // We optimize the truncation of induction variables having constant
7768     // integer steps. The cost of these truncations is the same as the scalar
7769     // operation.
7770     if (isOptimizableIVTruncate(I, VF)) {
7771       auto *Trunc = cast<TruncInst>(I);
7772       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7773                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7774     }
7775 
7776     // Detect reduction patterns
7777     InstructionCost RedCost;
7778     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7779             .isValid())
7780       return RedCost;
7781 
7782     Type *SrcScalarTy = I->getOperand(0)->getType();
7783     Type *SrcVecTy =
7784         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7785     if (canTruncateToMinimalBitwidth(I, VF)) {
7786       // This cast is going to be shrunk. This may remove the cast or it might
7787       // turn it into slightly different cast. For example, if MinBW == 16,
7788       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7789       //
7790       // Calculate the modified src and dest types.
7791       Type *MinVecTy = VectorTy;
7792       if (Opcode == Instruction::Trunc) {
7793         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7794         VectorTy =
7795             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7796       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7797         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7798         VectorTy =
7799             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7800       }
7801     }
7802 
7803     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7804   }
7805   case Instruction::Call: {
7806     bool NeedToScalarize;
7807     CallInst *CI = cast<CallInst>(I);
7808     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7809     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7810       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7811       return std::min(CallCost, IntrinsicCost);
7812     }
7813     return CallCost;
7814   }
7815   case Instruction::ExtractValue:
7816     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7817   default:
7818     // This opcode is unknown. Assume that it is the same as 'mul'.
7819     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7820   } // end of switch.
7821 }
7822 
7823 char LoopVectorize::ID = 0;
7824 
7825 static const char lv_name[] = "Loop Vectorization";
7826 
7827 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7828 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7829 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7830 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7831 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7832 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7833 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7834 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7835 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7836 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7837 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7838 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7839 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7840 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7841 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7842 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7843 
7844 namespace llvm {
7845 
7846 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7847 
7848 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7849                               bool VectorizeOnlyWhenForced) {
7850   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7851 }
7852 
7853 } // end namespace llvm
7854 
7855 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7856   // Check if the pointer operand of a load or store instruction is
7857   // consecutive.
7858   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7859     return Legal->isConsecutivePtr(Ptr);
7860   return false;
7861 }
7862 
7863 void LoopVectorizationCostModel::collectValuesToIgnore() {
7864   // Ignore ephemeral values.
7865   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7866 
7867   // Ignore type-promoting instructions we identified during reduction
7868   // detection.
7869   for (auto &Reduction : Legal->getReductionVars()) {
7870     RecurrenceDescriptor &RedDes = Reduction.second;
7871     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7872     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7873   }
7874   // Ignore type-casting instructions we identified during induction
7875   // detection.
7876   for (auto &Induction : Legal->getInductionVars()) {
7877     InductionDescriptor &IndDes = Induction.second;
7878     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7879     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7880   }
7881 }
7882 
7883 void LoopVectorizationCostModel::collectInLoopReductions() {
7884   for (auto &Reduction : Legal->getReductionVars()) {
7885     PHINode *Phi = Reduction.first;
7886     RecurrenceDescriptor &RdxDesc = Reduction.second;
7887 
7888     // We don't collect reductions that are type promoted (yet).
7889     if (RdxDesc.getRecurrenceType() != Phi->getType())
7890       continue;
7891 
7892     // If the target would prefer this reduction to happen "in-loop", then we
7893     // want to record it as such.
7894     unsigned Opcode = RdxDesc.getOpcode();
7895     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7896         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7897                                    TargetTransformInfo::ReductionFlags()))
7898       continue;
7899 
7900     // Check that we can correctly put the reductions into the loop, by
7901     // finding the chain of operations that leads from the phi to the loop
7902     // exit value.
7903     SmallVector<Instruction *, 4> ReductionOperations =
7904         RdxDesc.getReductionOpChain(Phi, TheLoop);
7905     bool InLoop = !ReductionOperations.empty();
7906     if (InLoop) {
7907       InLoopReductionChains[Phi] = ReductionOperations;
7908       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7909       Instruction *LastChain = Phi;
7910       for (auto *I : ReductionOperations) {
7911         InLoopReductionImmediateChains[I] = LastChain;
7912         LastChain = I;
7913       }
7914     }
7915     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7916                       << " reduction for phi: " << *Phi << "\n");
7917   }
7918 }
7919 
7920 // TODO: we could return a pair of values that specify the max VF and
7921 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7922 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7923 // doesn't have a cost model that can choose which plan to execute if
7924 // more than one is generated.
7925 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7926                                  LoopVectorizationCostModel &CM) {
7927   unsigned WidestType;
7928   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7929   return WidestVectorRegBits / WidestType;
7930 }
7931 
7932 VectorizationFactor
7933 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7934   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7935   ElementCount VF = UserVF;
7936   // Outer loop handling: They may require CFG and instruction level
7937   // transformations before even evaluating whether vectorization is profitable.
7938   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7939   // the vectorization pipeline.
7940   if (!OrigLoop->isInnermost()) {
7941     // If the user doesn't provide a vectorization factor, determine a
7942     // reasonable one.
7943     if (UserVF.isZero()) {
7944       VF = ElementCount::getFixed(determineVPlanVF(
7945           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7946               .getFixedSize(),
7947           CM));
7948       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7949 
7950       // Make sure we have a VF > 1 for stress testing.
7951       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7952         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7953                           << "overriding computed VF.\n");
7954         VF = ElementCount::getFixed(4);
7955       }
7956     }
7957     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7958     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7959            "VF needs to be a power of two");
7960     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7961                       << "VF " << VF << " to build VPlans.\n");
7962     buildVPlans(VF, VF);
7963 
7964     // For VPlan build stress testing, we bail out after VPlan construction.
7965     if (VPlanBuildStressTest)
7966       return VectorizationFactor::Disabled();
7967 
7968     return {VF, 0 /*Cost*/};
7969   }
7970 
7971   LLVM_DEBUG(
7972       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7973                 "VPlan-native path.\n");
7974   return VectorizationFactor::Disabled();
7975 }
7976 
7977 Optional<VectorizationFactor>
7978 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7979   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7980   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7981   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7982     return None;
7983 
7984   // Invalidate interleave groups if all blocks of loop will be predicated.
7985   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7986       !useMaskedInterleavedAccesses(*TTI)) {
7987     LLVM_DEBUG(
7988         dbgs()
7989         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7990            "which requires masked-interleaved support.\n");
7991     if (CM.InterleaveInfo.invalidateGroups())
7992       // Invalidating interleave groups also requires invalidating all decisions
7993       // based on them, which includes widening decisions and uniform and scalar
7994       // values.
7995       CM.invalidateCostModelingDecisions();
7996   }
7997 
7998   ElementCount MaxUserVF =
7999       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
8000   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
8001   if (!UserVF.isZero() && UserVFIsLegal) {
8002     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
8003                       << " VF " << UserVF << ".\n");
8004     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
8005            "VF needs to be a power of two");
8006     // Collect the instructions (and their associated costs) that will be more
8007     // profitable to scalarize.
8008     CM.selectUserVectorizationFactor(UserVF);
8009     CM.collectInLoopReductions();
8010     buildVPlansWithVPRecipes(UserVF, UserVF);
8011     LLVM_DEBUG(printPlans(dbgs()));
8012     return {{UserVF, 0}};
8013   }
8014 
8015   // Populate the set of Vectorization Factor Candidates.
8016   ElementCountSet VFCandidates;
8017   for (auto VF = ElementCount::getFixed(1);
8018        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
8019     VFCandidates.insert(VF);
8020   for (auto VF = ElementCount::getScalable(1);
8021        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
8022     VFCandidates.insert(VF);
8023 
8024   for (const auto &VF : VFCandidates) {
8025     // Collect Uniform and Scalar instructions after vectorization with VF.
8026     CM.collectUniformsAndScalars(VF);
8027 
8028     // Collect the instructions (and their associated costs) that will be more
8029     // profitable to scalarize.
8030     if (VF.isVector())
8031       CM.collectInstsToScalarize(VF);
8032   }
8033 
8034   CM.collectInLoopReductions();
8035   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
8036   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
8037 
8038   LLVM_DEBUG(printPlans(dbgs()));
8039   if (!MaxFactors.hasVector())
8040     return VectorizationFactor::Disabled();
8041 
8042   // Select the optimal vectorization factor.
8043   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
8044 
8045   // Check if it is profitable to vectorize with runtime checks.
8046   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
8047   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
8048     bool PragmaThresholdReached =
8049         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
8050     bool ThresholdReached =
8051         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
8052     if ((ThresholdReached && !Hints.allowReordering()) ||
8053         PragmaThresholdReached) {
8054       ORE->emit([&]() {
8055         return OptimizationRemarkAnalysisAliasing(
8056                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
8057                    OrigLoop->getHeader())
8058                << "loop not vectorized: cannot prove it is safe to reorder "
8059                   "memory operations";
8060       });
8061       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8062       Hints.emitRemarkWithHints();
8063       return VectorizationFactor::Disabled();
8064     }
8065   }
8066   return SelectedVF;
8067 }
8068 
8069 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
8070   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
8071                     << '\n');
8072   BestVF = VF;
8073   BestUF = UF;
8074 
8075   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
8076     return !Plan->hasVF(VF);
8077   });
8078   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
8079 }
8080 
8081 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
8082                                            DominatorTree *DT) {
8083   // Perform the actual loop transformation.
8084 
8085   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
8086   assert(BestVF.hasValue() && "Vectorization Factor is missing");
8087   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
8088 
8089   VPTransformState State{
8090       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
8091   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
8092   State.TripCount = ILV.getOrCreateTripCount(nullptr);
8093   State.CanonicalIV = ILV.Induction;
8094 
8095   ILV.printDebugTracesAtStart();
8096 
8097   //===------------------------------------------------===//
8098   //
8099   // Notice: any optimization or new instruction that go
8100   // into the code below should also be implemented in
8101   // the cost-model.
8102   //
8103   //===------------------------------------------------===//
8104 
8105   // 2. Copy and widen instructions from the old loop into the new loop.
8106   VPlans.front()->execute(&State);
8107 
8108   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8109   //    predication, updating analyses.
8110   ILV.fixVectorizedLoop(State);
8111 
8112   ILV.printDebugTracesAtEnd();
8113 }
8114 
8115 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8116 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8117   for (const auto &Plan : VPlans)
8118     if (PrintVPlansInDotFormat)
8119       Plan->printDOT(O);
8120     else
8121       Plan->print(O);
8122 }
8123 #endif
8124 
8125 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8126     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8127 
8128   // We create new control-flow for the vectorized loop, so the original exit
8129   // conditions will be dead after vectorization if it's only used by the
8130   // terminator
8131   SmallVector<BasicBlock*> ExitingBlocks;
8132   OrigLoop->getExitingBlocks(ExitingBlocks);
8133   for (auto *BB : ExitingBlocks) {
8134     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8135     if (!Cmp || !Cmp->hasOneUse())
8136       continue;
8137 
8138     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8139     if (!DeadInstructions.insert(Cmp).second)
8140       continue;
8141 
8142     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8143     // TODO: can recurse through operands in general
8144     for (Value *Op : Cmp->operands()) {
8145       if (isa<TruncInst>(Op) && Op->hasOneUse())
8146           DeadInstructions.insert(cast<Instruction>(Op));
8147     }
8148   }
8149 
8150   // We create new "steps" for induction variable updates to which the original
8151   // induction variables map. An original update instruction will be dead if
8152   // all its users except the induction variable are dead.
8153   auto *Latch = OrigLoop->getLoopLatch();
8154   for (auto &Induction : Legal->getInductionVars()) {
8155     PHINode *Ind = Induction.first;
8156     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8157 
8158     // If the tail is to be folded by masking, the primary induction variable,
8159     // if exists, isn't dead: it will be used for masking. Don't kill it.
8160     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8161       continue;
8162 
8163     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8164           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8165         }))
8166       DeadInstructions.insert(IndUpdate);
8167 
8168     // We record as "Dead" also the type-casting instructions we had identified
8169     // during induction analysis. We don't need any handling for them in the
8170     // vectorized loop because we have proven that, under a proper runtime
8171     // test guarding the vectorized loop, the value of the phi, and the casted
8172     // value of the phi, are the same. The last instruction in this casting chain
8173     // will get its scalar/vector/widened def from the scalar/vector/widened def
8174     // of the respective phi node. Any other casts in the induction def-use chain
8175     // have no other uses outside the phi update chain, and will be ignored.
8176     InductionDescriptor &IndDes = Induction.second;
8177     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
8178     DeadInstructions.insert(Casts.begin(), Casts.end());
8179   }
8180 }
8181 
8182 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
8183 
8184 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8185 
8186 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
8187                                         Instruction::BinaryOps BinOp) {
8188   // When unrolling and the VF is 1, we only need to add a simple scalar.
8189   Type *Ty = Val->getType();
8190   assert(!Ty->isVectorTy() && "Val must be a scalar");
8191 
8192   if (Ty->isFloatingPointTy()) {
8193     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
8194 
8195     // Floating-point operations inherit FMF via the builder's flags.
8196     Value *MulOp = Builder.CreateFMul(C, Step);
8197     return Builder.CreateBinOp(BinOp, Val, MulOp);
8198   }
8199   Constant *C = ConstantInt::get(Ty, StartIdx);
8200   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
8201 }
8202 
8203 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8204   SmallVector<Metadata *, 4> MDs;
8205   // Reserve first location for self reference to the LoopID metadata node.
8206   MDs.push_back(nullptr);
8207   bool IsUnrollMetadata = false;
8208   MDNode *LoopID = L->getLoopID();
8209   if (LoopID) {
8210     // First find existing loop unrolling disable metadata.
8211     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8212       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8213       if (MD) {
8214         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8215         IsUnrollMetadata =
8216             S && S->getString().startswith("llvm.loop.unroll.disable");
8217       }
8218       MDs.push_back(LoopID->getOperand(i));
8219     }
8220   }
8221 
8222   if (!IsUnrollMetadata) {
8223     // Add runtime unroll disable metadata.
8224     LLVMContext &Context = L->getHeader()->getContext();
8225     SmallVector<Metadata *, 1> DisableOperands;
8226     DisableOperands.push_back(
8227         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8228     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8229     MDs.push_back(DisableNode);
8230     MDNode *NewLoopID = MDNode::get(Context, MDs);
8231     // Set operand 0 to refer to the loop id itself.
8232     NewLoopID->replaceOperandWith(0, NewLoopID);
8233     L->setLoopID(NewLoopID);
8234   }
8235 }
8236 
8237 //===--------------------------------------------------------------------===//
8238 // EpilogueVectorizerMainLoop
8239 //===--------------------------------------------------------------------===//
8240 
8241 /// This function is partially responsible for generating the control flow
8242 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8243 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8244   MDNode *OrigLoopID = OrigLoop->getLoopID();
8245   Loop *Lp = createVectorLoopSkeleton("");
8246 
8247   // Generate the code to check the minimum iteration count of the vector
8248   // epilogue (see below).
8249   EPI.EpilogueIterationCountCheck =
8250       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8251   EPI.EpilogueIterationCountCheck->setName("iter.check");
8252 
8253   // Generate the code to check any assumptions that we've made for SCEV
8254   // expressions.
8255   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8256 
8257   // Generate the code that checks at runtime if arrays overlap. We put the
8258   // checks into a separate block to make the more common case of few elements
8259   // faster.
8260   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8261 
8262   // Generate the iteration count check for the main loop, *after* the check
8263   // for the epilogue loop, so that the path-length is shorter for the case
8264   // that goes directly through the vector epilogue. The longer-path length for
8265   // the main loop is compensated for, by the gain from vectorizing the larger
8266   // trip count. Note: the branch will get updated later on when we vectorize
8267   // the epilogue.
8268   EPI.MainLoopIterationCountCheck =
8269       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8270 
8271   // Generate the induction variable.
8272   OldInduction = Legal->getPrimaryInduction();
8273   Type *IdxTy = Legal->getWidestInductionType();
8274   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8275   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8276   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8277   EPI.VectorTripCount = CountRoundDown;
8278   Induction =
8279       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8280                               getDebugLocFromInstOrOperands(OldInduction));
8281 
8282   // Skip induction resume value creation here because they will be created in
8283   // the second pass. If we created them here, they wouldn't be used anyway,
8284   // because the vplan in the second pass still contains the inductions from the
8285   // original loop.
8286 
8287   return completeLoopSkeleton(Lp, OrigLoopID);
8288 }
8289 
8290 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8291   LLVM_DEBUG({
8292     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8293            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8294            << ", Main Loop UF:" << EPI.MainLoopUF
8295            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8296            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8297   });
8298 }
8299 
8300 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8301   DEBUG_WITH_TYPE(VerboseDebug, {
8302     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8303   });
8304 }
8305 
8306 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8307     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8308   assert(L && "Expected valid Loop.");
8309   assert(Bypass && "Expected valid bypass basic block.");
8310   unsigned VFactor =
8311       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8312   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8313   Value *Count = getOrCreateTripCount(L);
8314   // Reuse existing vector loop preheader for TC checks.
8315   // Note that new preheader block is generated for vector loop.
8316   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8317   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8318 
8319   // Generate code to check if the loop's trip count is less than VF * UF of the
8320   // main vector loop.
8321   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8322       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8323 
8324   Value *CheckMinIters = Builder.CreateICmp(
8325       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8326       "min.iters.check");
8327 
8328   if (!ForEpilogue)
8329     TCCheckBlock->setName("vector.main.loop.iter.check");
8330 
8331   // Create new preheader for vector loop.
8332   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8333                                    DT, LI, nullptr, "vector.ph");
8334 
8335   if (ForEpilogue) {
8336     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8337                                  DT->getNode(Bypass)->getIDom()) &&
8338            "TC check is expected to dominate Bypass");
8339 
8340     // Update dominator for Bypass & LoopExit.
8341     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8342     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8343 
8344     LoopBypassBlocks.push_back(TCCheckBlock);
8345 
8346     // Save the trip count so we don't have to regenerate it in the
8347     // vec.epilog.iter.check. This is safe to do because the trip count
8348     // generated here dominates the vector epilog iter check.
8349     EPI.TripCount = Count;
8350   }
8351 
8352   ReplaceInstWithInst(
8353       TCCheckBlock->getTerminator(),
8354       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8355 
8356   return TCCheckBlock;
8357 }
8358 
8359 //===--------------------------------------------------------------------===//
8360 // EpilogueVectorizerEpilogueLoop
8361 //===--------------------------------------------------------------------===//
8362 
8363 /// This function is partially responsible for generating the control flow
8364 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8365 BasicBlock *
8366 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8367   MDNode *OrigLoopID = OrigLoop->getLoopID();
8368   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8369 
8370   // Now, compare the remaining count and if there aren't enough iterations to
8371   // execute the vectorized epilogue skip to the scalar part.
8372   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8373   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8374   LoopVectorPreHeader =
8375       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8376                  LI, nullptr, "vec.epilog.ph");
8377   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8378                                           VecEpilogueIterationCountCheck);
8379 
8380   // Adjust the control flow taking the state info from the main loop
8381   // vectorization into account.
8382   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8383          "expected this to be saved from the previous pass.");
8384   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8385       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8386 
8387   DT->changeImmediateDominator(LoopVectorPreHeader,
8388                                EPI.MainLoopIterationCountCheck);
8389 
8390   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8391       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8392 
8393   if (EPI.SCEVSafetyCheck)
8394     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8395         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8396   if (EPI.MemSafetyCheck)
8397     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8398         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8399 
8400   DT->changeImmediateDominator(
8401       VecEpilogueIterationCountCheck,
8402       VecEpilogueIterationCountCheck->getSinglePredecessor());
8403 
8404   DT->changeImmediateDominator(LoopScalarPreHeader,
8405                                EPI.EpilogueIterationCountCheck);
8406   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8407 
8408   // Keep track of bypass blocks, as they feed start values to the induction
8409   // phis in the scalar loop preheader.
8410   if (EPI.SCEVSafetyCheck)
8411     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8412   if (EPI.MemSafetyCheck)
8413     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8414   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8415 
8416   // Generate a resume induction for the vector epilogue and put it in the
8417   // vector epilogue preheader
8418   Type *IdxTy = Legal->getWidestInductionType();
8419   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8420                                          LoopVectorPreHeader->getFirstNonPHI());
8421   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8422   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8423                            EPI.MainLoopIterationCountCheck);
8424 
8425   // Generate the induction variable.
8426   OldInduction = Legal->getPrimaryInduction();
8427   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8428   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8429   Value *StartIdx = EPResumeVal;
8430   Induction =
8431       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8432                               getDebugLocFromInstOrOperands(OldInduction));
8433 
8434   // Generate induction resume values. These variables save the new starting
8435   // indexes for the scalar loop. They are used to test if there are any tail
8436   // iterations left once the vector loop has completed.
8437   // Note that when the vectorized epilogue is skipped due to iteration count
8438   // check, then the resume value for the induction variable comes from
8439   // the trip count of the main vector loop, hence passing the AdditionalBypass
8440   // argument.
8441   createInductionResumeValues(Lp, CountRoundDown,
8442                               {VecEpilogueIterationCountCheck,
8443                                EPI.VectorTripCount} /* AdditionalBypass */);
8444 
8445   AddRuntimeUnrollDisableMetaData(Lp);
8446   return completeLoopSkeleton(Lp, OrigLoopID);
8447 }
8448 
8449 BasicBlock *
8450 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8451     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8452 
8453   assert(EPI.TripCount &&
8454          "Expected trip count to have been safed in the first pass.");
8455   assert(
8456       (!isa<Instruction>(EPI.TripCount) ||
8457        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8458       "saved trip count does not dominate insertion point.");
8459   Value *TC = EPI.TripCount;
8460   IRBuilder<> Builder(Insert->getTerminator());
8461   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8462 
8463   // Generate code to check if the loop's trip count is less than VF * UF of the
8464   // vector epilogue loop.
8465   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8466       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8467 
8468   Value *CheckMinIters = Builder.CreateICmp(
8469       P, Count,
8470       ConstantInt::get(Count->getType(),
8471                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8472       "min.epilog.iters.check");
8473 
8474   ReplaceInstWithInst(
8475       Insert->getTerminator(),
8476       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8477 
8478   LoopBypassBlocks.push_back(Insert);
8479   return Insert;
8480 }
8481 
8482 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8483   LLVM_DEBUG({
8484     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8485            << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8486            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8487   });
8488 }
8489 
8490 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8491   DEBUG_WITH_TYPE(VerboseDebug, {
8492     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8493   });
8494 }
8495 
8496 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8497     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8498   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8499   bool PredicateAtRangeStart = Predicate(Range.Start);
8500 
8501   for (ElementCount TmpVF = Range.Start * 2;
8502        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8503     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8504       Range.End = TmpVF;
8505       break;
8506     }
8507 
8508   return PredicateAtRangeStart;
8509 }
8510 
8511 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8512 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8513 /// of VF's starting at a given VF and extending it as much as possible. Each
8514 /// vectorization decision can potentially shorten this sub-range during
8515 /// buildVPlan().
8516 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8517                                            ElementCount MaxVF) {
8518   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8519   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8520     VFRange SubRange = {VF, MaxVFPlusOne};
8521     VPlans.push_back(buildVPlan(SubRange));
8522     VF = SubRange.End;
8523   }
8524 }
8525 
8526 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8527                                          VPlanPtr &Plan) {
8528   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8529 
8530   // Look for cached value.
8531   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8532   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8533   if (ECEntryIt != EdgeMaskCache.end())
8534     return ECEntryIt->second;
8535 
8536   VPValue *SrcMask = createBlockInMask(Src, Plan);
8537 
8538   // The terminator has to be a branch inst!
8539   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8540   assert(BI && "Unexpected terminator found");
8541 
8542   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8543     return EdgeMaskCache[Edge] = SrcMask;
8544 
8545   // If source is an exiting block, we know the exit edge is dynamically dead
8546   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8547   // adding uses of an otherwise potentially dead instruction.
8548   if (OrigLoop->isLoopExiting(Src))
8549     return EdgeMaskCache[Edge] = SrcMask;
8550 
8551   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8552   assert(EdgeMask && "No Edge Mask found for condition");
8553 
8554   if (BI->getSuccessor(0) != Dst)
8555     EdgeMask = Builder.createNot(EdgeMask);
8556 
8557   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8558     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8559     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8560     // The select version does not introduce new UB if SrcMask is false and
8561     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8562     VPValue *False = Plan->getOrAddVPValue(
8563         ConstantInt::getFalse(BI->getCondition()->getType()));
8564     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8565   }
8566 
8567   return EdgeMaskCache[Edge] = EdgeMask;
8568 }
8569 
8570 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8571   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8572 
8573   // Look for cached value.
8574   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8575   if (BCEntryIt != BlockMaskCache.end())
8576     return BCEntryIt->second;
8577 
8578   // All-one mask is modelled as no-mask following the convention for masked
8579   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8580   VPValue *BlockMask = nullptr;
8581 
8582   if (OrigLoop->getHeader() == BB) {
8583     if (!CM.blockNeedsPredication(BB))
8584       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8585 
8586     // Create the block in mask as the first non-phi instruction in the block.
8587     VPBuilder::InsertPointGuard Guard(Builder);
8588     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8589     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8590 
8591     // Introduce the early-exit compare IV <= BTC to form header block mask.
8592     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8593     // Start by constructing the desired canonical IV.
8594     VPValue *IV = nullptr;
8595     if (Legal->getPrimaryInduction())
8596       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8597     else {
8598       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8599       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8600       IV = IVRecipe->getVPSingleValue();
8601     }
8602     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8603     bool TailFolded = !CM.isScalarEpilogueAllowed();
8604 
8605     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8606       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8607       // as a second argument, we only pass the IV here and extract the
8608       // tripcount from the transform state where codegen of the VP instructions
8609       // happen.
8610       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8611     } else {
8612       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8613     }
8614     return BlockMaskCache[BB] = BlockMask;
8615   }
8616 
8617   // This is the block mask. We OR all incoming edges.
8618   for (auto *Predecessor : predecessors(BB)) {
8619     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8620     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8621       return BlockMaskCache[BB] = EdgeMask;
8622 
8623     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8624       BlockMask = EdgeMask;
8625       continue;
8626     }
8627 
8628     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8629   }
8630 
8631   return BlockMaskCache[BB] = BlockMask;
8632 }
8633 
8634 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8635                                                 ArrayRef<VPValue *> Operands,
8636                                                 VFRange &Range,
8637                                                 VPlanPtr &Plan) {
8638   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8639          "Must be called with either a load or store");
8640 
8641   auto willWiden = [&](ElementCount VF) -> bool {
8642     if (VF.isScalar())
8643       return false;
8644     LoopVectorizationCostModel::InstWidening Decision =
8645         CM.getWideningDecision(I, VF);
8646     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8647            "CM decision should be taken at this point.");
8648     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8649       return true;
8650     if (CM.isScalarAfterVectorization(I, VF) ||
8651         CM.isProfitableToScalarize(I, VF))
8652       return false;
8653     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8654   };
8655 
8656   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8657     return nullptr;
8658 
8659   VPValue *Mask = nullptr;
8660   if (Legal->isMaskRequired(I))
8661     Mask = createBlockInMask(I->getParent(), Plan);
8662 
8663   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8664     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask);
8665 
8666   StoreInst *Store = cast<StoreInst>(I);
8667   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8668                                             Mask);
8669 }
8670 
8671 VPWidenIntOrFpInductionRecipe *
8672 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8673                                            ArrayRef<VPValue *> Operands) const {
8674   // Check if this is an integer or fp induction. If so, build the recipe that
8675   // produces its scalar and vector values.
8676   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8677   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8678       II.getKind() == InductionDescriptor::IK_FpInduction) {
8679     assert(II.getStartValue() ==
8680            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8681     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8682     return new VPWidenIntOrFpInductionRecipe(
8683         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8684   }
8685 
8686   return nullptr;
8687 }
8688 
8689 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8690     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8691     VPlan &Plan) const {
8692   // Optimize the special case where the source is a constant integer
8693   // induction variable. Notice that we can only optimize the 'trunc' case
8694   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8695   // (c) other casts depend on pointer size.
8696 
8697   // Determine whether \p K is a truncation based on an induction variable that
8698   // can be optimized.
8699   auto isOptimizableIVTruncate =
8700       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8701     return [=](ElementCount VF) -> bool {
8702       return CM.isOptimizableIVTruncate(K, VF);
8703     };
8704   };
8705 
8706   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8707           isOptimizableIVTruncate(I), Range)) {
8708 
8709     InductionDescriptor II =
8710         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8711     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8712     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8713                                              Start, nullptr, I);
8714   }
8715   return nullptr;
8716 }
8717 
8718 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8719                                                 ArrayRef<VPValue *> Operands,
8720                                                 VPlanPtr &Plan) {
8721   // If all incoming values are equal, the incoming VPValue can be used directly
8722   // instead of creating a new VPBlendRecipe.
8723   VPValue *FirstIncoming = Operands[0];
8724   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8725         return FirstIncoming == Inc;
8726       })) {
8727     return Operands[0];
8728   }
8729 
8730   // We know that all PHIs in non-header blocks are converted into selects, so
8731   // we don't have to worry about the insertion order and we can just use the
8732   // builder. At this point we generate the predication tree. There may be
8733   // duplications since this is a simple recursive scan, but future
8734   // optimizations will clean it up.
8735   SmallVector<VPValue *, 2> OperandsWithMask;
8736   unsigned NumIncoming = Phi->getNumIncomingValues();
8737 
8738   for (unsigned In = 0; In < NumIncoming; In++) {
8739     VPValue *EdgeMask =
8740       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8741     assert((EdgeMask || NumIncoming == 1) &&
8742            "Multiple predecessors with one having a full mask");
8743     OperandsWithMask.push_back(Operands[In]);
8744     if (EdgeMask)
8745       OperandsWithMask.push_back(EdgeMask);
8746   }
8747   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8748 }
8749 
8750 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8751                                                    ArrayRef<VPValue *> Operands,
8752                                                    VFRange &Range) const {
8753 
8754   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8755       [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
8756       Range);
8757 
8758   if (IsPredicated)
8759     return nullptr;
8760 
8761   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8762   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8763              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8764              ID == Intrinsic::pseudoprobe ||
8765              ID == Intrinsic::experimental_noalias_scope_decl))
8766     return nullptr;
8767 
8768   auto willWiden = [&](ElementCount VF) -> bool {
8769     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8770     // The following case may be scalarized depending on the VF.
8771     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8772     // version of the instruction.
8773     // Is it beneficial to perform intrinsic call compared to lib call?
8774     bool NeedToScalarize = false;
8775     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8776     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8777     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8778     assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
8779            "Either the intrinsic cost or vector call cost must be valid");
8780     return UseVectorIntrinsic || !NeedToScalarize;
8781   };
8782 
8783   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8784     return nullptr;
8785 
8786   ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
8787   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8788 }
8789 
8790 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8791   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8792          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8793   // Instruction should be widened, unless it is scalar after vectorization,
8794   // scalarization is profitable or it is predicated.
8795   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8796     return CM.isScalarAfterVectorization(I, VF) ||
8797            CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
8798   };
8799   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8800                                                              Range);
8801 }
8802 
8803 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8804                                            ArrayRef<VPValue *> Operands) const {
8805   auto IsVectorizableOpcode = [](unsigned Opcode) {
8806     switch (Opcode) {
8807     case Instruction::Add:
8808     case Instruction::And:
8809     case Instruction::AShr:
8810     case Instruction::BitCast:
8811     case Instruction::FAdd:
8812     case Instruction::FCmp:
8813     case Instruction::FDiv:
8814     case Instruction::FMul:
8815     case Instruction::FNeg:
8816     case Instruction::FPExt:
8817     case Instruction::FPToSI:
8818     case Instruction::FPToUI:
8819     case Instruction::FPTrunc:
8820     case Instruction::FRem:
8821     case Instruction::FSub:
8822     case Instruction::ICmp:
8823     case Instruction::IntToPtr:
8824     case Instruction::LShr:
8825     case Instruction::Mul:
8826     case Instruction::Or:
8827     case Instruction::PtrToInt:
8828     case Instruction::SDiv:
8829     case Instruction::Select:
8830     case Instruction::SExt:
8831     case Instruction::Shl:
8832     case Instruction::SIToFP:
8833     case Instruction::SRem:
8834     case Instruction::Sub:
8835     case Instruction::Trunc:
8836     case Instruction::UDiv:
8837     case Instruction::UIToFP:
8838     case Instruction::URem:
8839     case Instruction::Xor:
8840     case Instruction::ZExt:
8841       return true;
8842     }
8843     return false;
8844   };
8845 
8846   if (!IsVectorizableOpcode(I->getOpcode()))
8847     return nullptr;
8848 
8849   // Success: widen this instruction.
8850   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8851 }
8852 
8853 void VPRecipeBuilder::fixHeaderPhis() {
8854   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8855   for (VPWidenPHIRecipe *R : PhisToFix) {
8856     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8857     VPRecipeBase *IncR =
8858         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8859     R->addOperand(IncR->getVPSingleValue());
8860   }
8861 }
8862 
8863 VPBasicBlock *VPRecipeBuilder::handleReplication(
8864     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8865     VPlanPtr &Plan) {
8866   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8867       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8868       Range);
8869 
8870   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8871       [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range);
8872 
8873   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8874                                        IsUniform, IsPredicated);
8875   setRecipe(I, Recipe);
8876   Plan->addVPValue(I, Recipe);
8877 
8878   // Find if I uses a predicated instruction. If so, it will use its scalar
8879   // value. Avoid hoisting the insert-element which packs the scalar value into
8880   // a vector value, as that happens iff all users use the vector value.
8881   for (VPValue *Op : Recipe->operands()) {
8882     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8883     if (!PredR)
8884       continue;
8885     auto *RepR =
8886         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8887     assert(RepR->isPredicated() &&
8888            "expected Replicate recipe to be predicated");
8889     RepR->setAlsoPack(false);
8890   }
8891 
8892   // Finalize the recipe for Instr, first if it is not predicated.
8893   if (!IsPredicated) {
8894     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8895     VPBB->appendRecipe(Recipe);
8896     return VPBB;
8897   }
8898   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8899   assert(VPBB->getSuccessors().empty() &&
8900          "VPBB has successors when handling predicated replication.");
8901   // Record predicated instructions for above packing optimizations.
8902   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8903   VPBlockUtils::insertBlockAfter(Region, VPBB);
8904   auto *RegSucc = new VPBasicBlock();
8905   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8906   return RegSucc;
8907 }
8908 
8909 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8910                                                       VPRecipeBase *PredRecipe,
8911                                                       VPlanPtr &Plan) {
8912   // Instructions marked for predication are replicated and placed under an
8913   // if-then construct to prevent side-effects.
8914 
8915   // Generate recipes to compute the block mask for this region.
8916   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8917 
8918   // Build the triangular if-then region.
8919   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8920   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8921   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8922   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8923   auto *PHIRecipe = Instr->getType()->isVoidTy()
8924                         ? nullptr
8925                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8926   if (PHIRecipe) {
8927     Plan->removeVPValueFor(Instr);
8928     Plan->addVPValue(Instr, PHIRecipe);
8929   }
8930   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8931   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8932   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8933 
8934   // Note: first set Entry as region entry and then connect successors starting
8935   // from it in order, to propagate the "parent" of each VPBasicBlock.
8936   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8937   VPBlockUtils::connectBlocks(Pred, Exit);
8938 
8939   return Region;
8940 }
8941 
8942 VPRecipeOrVPValueTy
8943 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8944                                         ArrayRef<VPValue *> Operands,
8945                                         VFRange &Range, VPlanPtr &Plan) {
8946   // First, check for specific widening recipes that deal with calls, memory
8947   // operations, inductions and Phi nodes.
8948   if (auto *CI = dyn_cast<CallInst>(Instr))
8949     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8950 
8951   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8952     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8953 
8954   VPRecipeBase *Recipe;
8955   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8956     if (Phi->getParent() != OrigLoop->getHeader())
8957       return tryToBlend(Phi, Operands, Plan);
8958     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8959       return toVPRecipeResult(Recipe);
8960 
8961     VPWidenPHIRecipe *PhiRecipe = nullptr;
8962     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8963       VPValue *StartV = Operands[0];
8964       if (Legal->isReductionVariable(Phi)) {
8965         RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8966         assert(RdxDesc.getRecurrenceStartValue() ==
8967                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8968         PhiRecipe = new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8969       } else {
8970         PhiRecipe = new VPWidenPHIRecipe(Phi, *StartV);
8971       }
8972 
8973       // Record the incoming value from the backedge, so we can add the incoming
8974       // value from the backedge after all recipes have been created.
8975       recordRecipeOf(cast<Instruction>(
8976           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8977       PhisToFix.push_back(PhiRecipe);
8978     } else {
8979       // TODO: record start and backedge value for remaining pointer induction
8980       // phis.
8981       assert(Phi->getType()->isPointerTy() &&
8982              "only pointer phis should be handled here");
8983       PhiRecipe = new VPWidenPHIRecipe(Phi);
8984     }
8985 
8986     return toVPRecipeResult(PhiRecipe);
8987   }
8988 
8989   if (isa<TruncInst>(Instr) &&
8990       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8991                                                Range, *Plan)))
8992     return toVPRecipeResult(Recipe);
8993 
8994   if (!shouldWiden(Instr, Range))
8995     return nullptr;
8996 
8997   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8998     return toVPRecipeResult(new VPWidenGEPRecipe(
8999         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
9000 
9001   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
9002     bool InvariantCond =
9003         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
9004     return toVPRecipeResult(new VPWidenSelectRecipe(
9005         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
9006   }
9007 
9008   return toVPRecipeResult(tryToWiden(Instr, Operands));
9009 }
9010 
9011 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
9012                                                         ElementCount MaxVF) {
9013   assert(OrigLoop->isInnermost() && "Inner loop expected.");
9014 
9015   // Collect instructions from the original loop that will become trivially dead
9016   // in the vectorized loop. We don't need to vectorize these instructions. For
9017   // example, original induction update instructions can become dead because we
9018   // separately emit induction "steps" when generating code for the new loop.
9019   // Similarly, we create a new latch condition when setting up the structure
9020   // of the new loop, so the old one can become dead.
9021   SmallPtrSet<Instruction *, 4> DeadInstructions;
9022   collectTriviallyDeadInstructions(DeadInstructions);
9023 
9024   // Add assume instructions we need to drop to DeadInstructions, to prevent
9025   // them from being added to the VPlan.
9026   // TODO: We only need to drop assumes in blocks that get flattend. If the
9027   // control flow is preserved, we should keep them.
9028   auto &ConditionalAssumes = Legal->getConditionalAssumes();
9029   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
9030 
9031   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
9032   // Dead instructions do not need sinking. Remove them from SinkAfter.
9033   for (Instruction *I : DeadInstructions)
9034     SinkAfter.erase(I);
9035 
9036   // Cannot sink instructions after dead instructions (there won't be any
9037   // recipes for them). Instead, find the first non-dead previous instruction.
9038   for (auto &P : Legal->getSinkAfter()) {
9039     Instruction *SinkTarget = P.second;
9040     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
9041     (void)FirstInst;
9042     while (DeadInstructions.contains(SinkTarget)) {
9043       assert(
9044           SinkTarget != FirstInst &&
9045           "Must find a live instruction (at least the one feeding the "
9046           "first-order recurrence PHI) before reaching beginning of the block");
9047       SinkTarget = SinkTarget->getPrevNode();
9048       assert(SinkTarget != P.first &&
9049              "sink source equals target, no sinking required");
9050     }
9051     P.second = SinkTarget;
9052   }
9053 
9054   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
9055   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
9056     VFRange SubRange = {VF, MaxVFPlusOne};
9057     VPlans.push_back(
9058         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
9059     VF = SubRange.End;
9060   }
9061 }
9062 
9063 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
9064     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
9065     const MapVector<Instruction *, Instruction *> &SinkAfter) {
9066 
9067   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
9068 
9069   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
9070 
9071   // ---------------------------------------------------------------------------
9072   // Pre-construction: record ingredients whose recipes we'll need to further
9073   // process after constructing the initial VPlan.
9074   // ---------------------------------------------------------------------------
9075 
9076   // Mark instructions we'll need to sink later and their targets as
9077   // ingredients whose recipe we'll need to record.
9078   for (auto &Entry : SinkAfter) {
9079     RecipeBuilder.recordRecipeOf(Entry.first);
9080     RecipeBuilder.recordRecipeOf(Entry.second);
9081   }
9082   for (auto &Reduction : CM.getInLoopReductionChains()) {
9083     PHINode *Phi = Reduction.first;
9084     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
9085     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9086 
9087     RecipeBuilder.recordRecipeOf(Phi);
9088     for (auto &R : ReductionOperations) {
9089       RecipeBuilder.recordRecipeOf(R);
9090       // For min/max reducitons, where we have a pair of icmp/select, we also
9091       // need to record the ICmp recipe, so it can be removed later.
9092       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9093         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9094     }
9095   }
9096 
9097   // For each interleave group which is relevant for this (possibly trimmed)
9098   // Range, add it to the set of groups to be later applied to the VPlan and add
9099   // placeholders for its members' Recipes which we'll be replacing with a
9100   // single VPInterleaveRecipe.
9101   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9102     auto applyIG = [IG, this](ElementCount VF) -> bool {
9103       return (VF.isVector() && // Query is illegal for VF == 1
9104               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9105                   LoopVectorizationCostModel::CM_Interleave);
9106     };
9107     if (!getDecisionAndClampRange(applyIG, Range))
9108       continue;
9109     InterleaveGroups.insert(IG);
9110     for (unsigned i = 0; i < IG->getFactor(); i++)
9111       if (Instruction *Member = IG->getMember(i))
9112         RecipeBuilder.recordRecipeOf(Member);
9113   };
9114 
9115   // ---------------------------------------------------------------------------
9116   // Build initial VPlan: Scan the body of the loop in a topological order to
9117   // visit each basic block after having visited its predecessor basic blocks.
9118   // ---------------------------------------------------------------------------
9119 
9120   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
9121   auto Plan = std::make_unique<VPlan>();
9122   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
9123   Plan->setEntry(VPBB);
9124 
9125   // Scan the body of the loop in a topological order to visit each basic block
9126   // after having visited its predecessor basic blocks.
9127   LoopBlocksDFS DFS(OrigLoop);
9128   DFS.perform(LI);
9129 
9130   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9131     // Relevant instructions from basic block BB will be grouped into VPRecipe
9132     // ingredients and fill a new VPBasicBlock.
9133     unsigned VPBBsForBB = 0;
9134     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
9135     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
9136     VPBB = FirstVPBBForBB;
9137     Builder.setInsertPoint(VPBB);
9138 
9139     // Introduce each ingredient into VPlan.
9140     // TODO: Model and preserve debug instrinsics in VPlan.
9141     for (Instruction &I : BB->instructionsWithoutDebug()) {
9142       Instruction *Instr = &I;
9143 
9144       // First filter out irrelevant instructions, to ensure no recipes are
9145       // built for them.
9146       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9147         continue;
9148 
9149       SmallVector<VPValue *, 4> Operands;
9150       auto *Phi = dyn_cast<PHINode>(Instr);
9151       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9152         Operands.push_back(Plan->getOrAddVPValue(
9153             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9154       } else {
9155         auto OpRange = Plan->mapToVPValues(Instr->operands());
9156         Operands = {OpRange.begin(), OpRange.end()};
9157       }
9158       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9159               Instr, Operands, Range, Plan)) {
9160         // If Instr can be simplified to an existing VPValue, use it.
9161         if (RecipeOrValue.is<VPValue *>()) {
9162           auto *VPV = RecipeOrValue.get<VPValue *>();
9163           Plan->addVPValue(Instr, VPV);
9164           // If the re-used value is a recipe, register the recipe for the
9165           // instruction, in case the recipe for Instr needs to be recorded.
9166           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9167             RecipeBuilder.setRecipe(Instr, R);
9168           continue;
9169         }
9170         // Otherwise, add the new recipe.
9171         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9172         for (auto *Def : Recipe->definedValues()) {
9173           auto *UV = Def->getUnderlyingValue();
9174           Plan->addVPValue(UV, Def);
9175         }
9176 
9177         RecipeBuilder.setRecipe(Instr, Recipe);
9178         VPBB->appendRecipe(Recipe);
9179         continue;
9180       }
9181 
9182       // Otherwise, if all widening options failed, Instruction is to be
9183       // replicated. This may create a successor for VPBB.
9184       VPBasicBlock *NextVPBB =
9185           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9186       if (NextVPBB != VPBB) {
9187         VPBB = NextVPBB;
9188         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9189                                     : "");
9190       }
9191     }
9192   }
9193 
9194   RecipeBuilder.fixHeaderPhis();
9195 
9196   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
9197   // may also be empty, such as the last one VPBB, reflecting original
9198   // basic-blocks with no recipes.
9199   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
9200   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
9201   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
9202   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
9203   delete PreEntry;
9204 
9205   // ---------------------------------------------------------------------------
9206   // Transform initial VPlan: Apply previously taken decisions, in order, to
9207   // bring the VPlan to its final state.
9208   // ---------------------------------------------------------------------------
9209 
9210   // Apply Sink-After legal constraints.
9211   for (auto &Entry : SinkAfter) {
9212     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9213     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9214 
9215     auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9216       auto *Region =
9217           dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9218       if (Region && Region->isReplicator()) {
9219         assert(Region->getNumSuccessors() == 1 &&
9220                Region->getNumPredecessors() == 1 && "Expected SESE region!");
9221         assert(R->getParent()->size() == 1 &&
9222                "A recipe in an original replicator region must be the only "
9223                "recipe in its block");
9224         return Region;
9225       }
9226       return nullptr;
9227     };
9228     auto *TargetRegion = GetReplicateRegion(Target);
9229     auto *SinkRegion = GetReplicateRegion(Sink);
9230     if (!SinkRegion) {
9231       // If the sink source is not a replicate region, sink the recipe directly.
9232       if (TargetRegion) {
9233         // The target is in a replication region, make sure to move Sink to
9234         // the block after it, not into the replication region itself.
9235         VPBasicBlock *NextBlock =
9236             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9237         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9238       } else
9239         Sink->moveAfter(Target);
9240       continue;
9241     }
9242 
9243     // The sink source is in a replicate region. Unhook the region from the CFG.
9244     auto *SinkPred = SinkRegion->getSinglePredecessor();
9245     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9246     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9247     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9248     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9249 
9250     if (TargetRegion) {
9251       // The target recipe is also in a replicate region, move the sink region
9252       // after the target region.
9253       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9254       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9255       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9256       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9257     } else {
9258       // The sink source is in a replicate region, we need to move the whole
9259       // replicate region, which should only contain a single recipe in the main
9260       // block.
9261       auto *SplitBlock =
9262           Target->getParent()->splitAt(std::next(Target->getIterator()));
9263 
9264       auto *SplitPred = SplitBlock->getSinglePredecessor();
9265 
9266       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9267       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9268       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9269       if (VPBB == SplitPred)
9270         VPBB = SplitBlock;
9271     }
9272   }
9273 
9274   // Interleave memory: for each Interleave Group we marked earlier as relevant
9275   // for this VPlan, replace the Recipes widening its memory instructions with a
9276   // single VPInterleaveRecipe at its insertion point.
9277   for (auto IG : InterleaveGroups) {
9278     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9279         RecipeBuilder.getRecipe(IG->getInsertPos()));
9280     SmallVector<VPValue *, 4> StoredValues;
9281     for (unsigned i = 0; i < IG->getFactor(); ++i)
9282       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
9283         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
9284 
9285     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9286                                         Recipe->getMask());
9287     VPIG->insertBefore(Recipe);
9288     unsigned J = 0;
9289     for (unsigned i = 0; i < IG->getFactor(); ++i)
9290       if (Instruction *Member = IG->getMember(i)) {
9291         if (!Member->getType()->isVoidTy()) {
9292           VPValue *OriginalV = Plan->getVPValue(Member);
9293           Plan->removeVPValueFor(Member);
9294           Plan->addVPValue(Member, VPIG->getVPValue(J));
9295           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9296           J++;
9297         }
9298         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9299       }
9300   }
9301 
9302   // Adjust the recipes for any inloop reductions.
9303   adjustRecipesForInLoopReductions(Plan, RecipeBuilder, Range.Start);
9304 
9305   // Finally, if tail is folded by masking, introduce selects between the phi
9306   // and the live-out instruction of each reduction, at the end of the latch.
9307   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
9308     Builder.setInsertPoint(VPBB);
9309     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9310     for (auto &Reduction : Legal->getReductionVars()) {
9311       if (CM.isInLoopReduction(Reduction.first))
9312         continue;
9313       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
9314       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
9315       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
9316     }
9317   }
9318 
9319   VPlanTransforms::sinkScalarOperands(*Plan);
9320   VPlanTransforms::mergeReplicateRegions(*Plan);
9321 
9322   std::string PlanName;
9323   raw_string_ostream RSO(PlanName);
9324   ElementCount VF = Range.Start;
9325   Plan->addVF(VF);
9326   RSO << "Initial VPlan for VF={" << VF;
9327   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9328     Plan->addVF(VF);
9329     RSO << "," << VF;
9330   }
9331   RSO << "},UF>=1";
9332   RSO.flush();
9333   Plan->setName(PlanName);
9334 
9335   return Plan;
9336 }
9337 
9338 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9339   // Outer loop handling: They may require CFG and instruction level
9340   // transformations before even evaluating whether vectorization is profitable.
9341   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9342   // the vectorization pipeline.
9343   assert(!OrigLoop->isInnermost());
9344   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9345 
9346   // Create new empty VPlan
9347   auto Plan = std::make_unique<VPlan>();
9348 
9349   // Build hierarchical CFG
9350   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9351   HCFGBuilder.buildHierarchicalCFG();
9352 
9353   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9354        VF *= 2)
9355     Plan->addVF(VF);
9356 
9357   if (EnableVPlanPredication) {
9358     VPlanPredicator VPP(*Plan);
9359     VPP.predicate();
9360 
9361     // Avoid running transformation to recipes until masked code generation in
9362     // VPlan-native path is in place.
9363     return Plan;
9364   }
9365 
9366   SmallPtrSet<Instruction *, 1> DeadInstructions;
9367   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9368                                              Legal->getInductionVars(),
9369                                              DeadInstructions, *PSE.getSE());
9370   return Plan;
9371 }
9372 
9373 // Adjust the recipes for any inloop reductions. The chain of instructions
9374 // leading from the loop exit instr to the phi need to be converted to
9375 // reductions, with one operand being vector and the other being the scalar
9376 // reduction chain.
9377 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
9378     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
9379   for (auto &Reduction : CM.getInLoopReductionChains()) {
9380     PHINode *Phi = Reduction.first;
9381     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9382     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9383 
9384     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9385       continue;
9386 
9387     // ReductionOperations are orders top-down from the phi's use to the
9388     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9389     // which of the two operands will remain scalar and which will be reduced.
9390     // For minmax the chain will be the select instructions.
9391     Instruction *Chain = Phi;
9392     for (Instruction *R : ReductionOperations) {
9393       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9394       RecurKind Kind = RdxDesc.getRecurrenceKind();
9395 
9396       VPValue *ChainOp = Plan->getVPValue(Chain);
9397       unsigned FirstOpId;
9398       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9399         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9400                "Expected to replace a VPWidenSelectSC");
9401         FirstOpId = 1;
9402       } else {
9403         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) &&
9404                "Expected to replace a VPWidenSC");
9405         FirstOpId = 0;
9406       }
9407       unsigned VecOpId =
9408           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9409       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9410 
9411       auto *CondOp = CM.foldTailByMasking()
9412                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9413                          : nullptr;
9414       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
9415           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9416       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9417       Plan->removeVPValueFor(R);
9418       Plan->addVPValue(R, RedRecipe);
9419       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9420       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9421       WidenRecipe->eraseFromParent();
9422 
9423       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9424         VPRecipeBase *CompareRecipe =
9425             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9426         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9427                "Expected to replace a VPWidenSC");
9428         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9429                "Expected no remaining users");
9430         CompareRecipe->eraseFromParent();
9431       }
9432       Chain = R;
9433     }
9434   }
9435 }
9436 
9437 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9438 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9439                                VPSlotTracker &SlotTracker) const {
9440   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9441   IG->getInsertPos()->printAsOperand(O, false);
9442   O << ", ";
9443   getAddr()->printAsOperand(O, SlotTracker);
9444   VPValue *Mask = getMask();
9445   if (Mask) {
9446     O << ", ";
9447     Mask->printAsOperand(O, SlotTracker);
9448   }
9449   for (unsigned i = 0; i < IG->getFactor(); ++i)
9450     if (Instruction *I = IG->getMember(i))
9451       O << "\n" << Indent << "  " << VPlanIngredient(I) << " " << i;
9452 }
9453 #endif
9454 
9455 void VPWidenCallRecipe::execute(VPTransformState &State) {
9456   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9457                                   *this, State);
9458 }
9459 
9460 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9461   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9462                                     this, *this, InvariantCond, State);
9463 }
9464 
9465 void VPWidenRecipe::execute(VPTransformState &State) {
9466   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9467 }
9468 
9469 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9470   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9471                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9472                       IsIndexLoopInvariant, State);
9473 }
9474 
9475 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9476   assert(!State.Instance && "Int or FP induction being replicated.");
9477   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9478                                    getTruncInst(), getVPValue(0),
9479                                    getCastValue(), State);
9480 }
9481 
9482 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9483   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
9484                                  this, State);
9485 }
9486 
9487 void VPBlendRecipe::execute(VPTransformState &State) {
9488   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9489   // We know that all PHIs in non-header blocks are converted into
9490   // selects, so we don't have to worry about the insertion order and we
9491   // can just use the builder.
9492   // At this point we generate the predication tree. There may be
9493   // duplications since this is a simple recursive scan, but future
9494   // optimizations will clean it up.
9495 
9496   unsigned NumIncoming = getNumIncomingValues();
9497 
9498   // Generate a sequence of selects of the form:
9499   // SELECT(Mask3, In3,
9500   //        SELECT(Mask2, In2,
9501   //               SELECT(Mask1, In1,
9502   //                      In0)))
9503   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9504   // are essentially undef are taken from In0.
9505   InnerLoopVectorizer::VectorParts Entry(State.UF);
9506   for (unsigned In = 0; In < NumIncoming; ++In) {
9507     for (unsigned Part = 0; Part < State.UF; ++Part) {
9508       // We might have single edge PHIs (blocks) - use an identity
9509       // 'select' for the first PHI operand.
9510       Value *In0 = State.get(getIncomingValue(In), Part);
9511       if (In == 0)
9512         Entry[Part] = In0; // Initialize with the first incoming value.
9513       else {
9514         // Select between the current value and the previous incoming edge
9515         // based on the incoming mask.
9516         Value *Cond = State.get(getMask(In), Part);
9517         Entry[Part] =
9518             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9519       }
9520     }
9521   }
9522   for (unsigned Part = 0; Part < State.UF; ++Part)
9523     State.set(this, Entry[Part], Part);
9524 }
9525 
9526 void VPInterleaveRecipe::execute(VPTransformState &State) {
9527   assert(!State.Instance && "Interleave group being replicated.");
9528   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9529                                       getStoredValues(), getMask());
9530 }
9531 
9532 void VPReductionRecipe::execute(VPTransformState &State) {
9533   assert(!State.Instance && "Reduction being replicated.");
9534   Value *PrevInChain = State.get(getChainOp(), 0);
9535   for (unsigned Part = 0; Part < State.UF; ++Part) {
9536     RecurKind Kind = RdxDesc->getRecurrenceKind();
9537     bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9538     Value *NewVecOp = State.get(getVecOp(), Part);
9539     if (VPValue *Cond = getCondOp()) {
9540       Value *NewCond = State.get(Cond, Part);
9541       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9542       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9543           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9544       Constant *IdenVec =
9545           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9546       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9547       NewVecOp = Select;
9548     }
9549     Value *NewRed;
9550     Value *NextInChain;
9551     if (IsOrdered) {
9552       if (State.VF.isVector())
9553         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9554                                         PrevInChain);
9555       else
9556         NewRed = State.Builder.CreateBinOp(
9557             (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(),
9558             PrevInChain, NewVecOp);
9559       PrevInChain = NewRed;
9560     } else {
9561       PrevInChain = State.get(getChainOp(), Part);
9562       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9563     }
9564     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9565       NextInChain =
9566           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9567                          NewRed, PrevInChain);
9568     } else if (IsOrdered)
9569       NextInChain = NewRed;
9570     else {
9571       NextInChain = State.Builder.CreateBinOp(
9572           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9573           PrevInChain);
9574     }
9575     State.set(this, NextInChain, Part);
9576   }
9577 }
9578 
9579 void VPReplicateRecipe::execute(VPTransformState &State) {
9580   if (State.Instance) { // Generate a single instance.
9581     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9582     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9583                                     *State.Instance, IsPredicated, State);
9584     // Insert scalar instance packing it into a vector.
9585     if (AlsoPack && State.VF.isVector()) {
9586       // If we're constructing lane 0, initialize to start from poison.
9587       if (State.Instance->Lane.isFirstLane()) {
9588         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9589         Value *Poison = PoisonValue::get(
9590             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9591         State.set(this, Poison, State.Instance->Part);
9592       }
9593       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9594     }
9595     return;
9596   }
9597 
9598   // Generate scalar instances for all VF lanes of all UF parts, unless the
9599   // instruction is uniform inwhich case generate only the first lane for each
9600   // of the UF parts.
9601   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9602   assert((!State.VF.isScalable() || IsUniform) &&
9603          "Can't scalarize a scalable vector");
9604   for (unsigned Part = 0; Part < State.UF; ++Part)
9605     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9606       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9607                                       VPIteration(Part, Lane), IsPredicated,
9608                                       State);
9609 }
9610 
9611 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9612   assert(State.Instance && "Branch on Mask works only on single instance.");
9613 
9614   unsigned Part = State.Instance->Part;
9615   unsigned Lane = State.Instance->Lane.getKnownLane();
9616 
9617   Value *ConditionBit = nullptr;
9618   VPValue *BlockInMask = getMask();
9619   if (BlockInMask) {
9620     ConditionBit = State.get(BlockInMask, Part);
9621     if (ConditionBit->getType()->isVectorTy())
9622       ConditionBit = State.Builder.CreateExtractElement(
9623           ConditionBit, State.Builder.getInt32(Lane));
9624   } else // Block in mask is all-one.
9625     ConditionBit = State.Builder.getTrue();
9626 
9627   // Replace the temporary unreachable terminator with a new conditional branch,
9628   // whose two destinations will be set later when they are created.
9629   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9630   assert(isa<UnreachableInst>(CurrentTerminator) &&
9631          "Expected to replace unreachable terminator with conditional branch.");
9632   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9633   CondBr->setSuccessor(0, nullptr);
9634   ReplaceInstWithInst(CurrentTerminator, CondBr);
9635 }
9636 
9637 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9638   assert(State.Instance && "Predicated instruction PHI works per instance.");
9639   Instruction *ScalarPredInst =
9640       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9641   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9642   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9643   assert(PredicatingBB && "Predicated block has no single predecessor.");
9644   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9645          "operand must be VPReplicateRecipe");
9646 
9647   // By current pack/unpack logic we need to generate only a single phi node: if
9648   // a vector value for the predicated instruction exists at this point it means
9649   // the instruction has vector users only, and a phi for the vector value is
9650   // needed. In this case the recipe of the predicated instruction is marked to
9651   // also do that packing, thereby "hoisting" the insert-element sequence.
9652   // Otherwise, a phi node for the scalar value is needed.
9653   unsigned Part = State.Instance->Part;
9654   if (State.hasVectorValue(getOperand(0), Part)) {
9655     Value *VectorValue = State.get(getOperand(0), Part);
9656     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9657     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9658     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9659     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9660     if (State.hasVectorValue(this, Part))
9661       State.reset(this, VPhi, Part);
9662     else
9663       State.set(this, VPhi, Part);
9664     // NOTE: Currently we need to update the value of the operand, so the next
9665     // predicated iteration inserts its generated value in the correct vector.
9666     State.reset(getOperand(0), VPhi, Part);
9667   } else {
9668     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9669     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9670     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9671                      PredicatingBB);
9672     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9673     if (State.hasScalarValue(this, *State.Instance))
9674       State.reset(this, Phi, *State.Instance);
9675     else
9676       State.set(this, Phi, *State.Instance);
9677     // NOTE: Currently we need to update the value of the operand, so the next
9678     // predicated iteration inserts its generated value in the correct vector.
9679     State.reset(getOperand(0), Phi, *State.Instance);
9680   }
9681 }
9682 
9683 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9684   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9685   State.ILV->vectorizeMemoryInstruction(
9686       &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(),
9687       StoredValue, getMask());
9688 }
9689 
9690 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9691 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9692 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9693 // for predication.
9694 static ScalarEpilogueLowering getScalarEpilogueLowering(
9695     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9696     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9697     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9698     LoopVectorizationLegality &LVL) {
9699   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9700   // don't look at hints or options, and don't request a scalar epilogue.
9701   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9702   // LoopAccessInfo (due to code dependency and not being able to reliably get
9703   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9704   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9705   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9706   // back to the old way and vectorize with versioning when forced. See D81345.)
9707   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9708                                                       PGSOQueryType::IRPass) &&
9709                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9710     return CM_ScalarEpilogueNotAllowedOptSize;
9711 
9712   // 2) If set, obey the directives
9713   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9714     switch (PreferPredicateOverEpilogue) {
9715     case PreferPredicateTy::ScalarEpilogue:
9716       return CM_ScalarEpilogueAllowed;
9717     case PreferPredicateTy::PredicateElseScalarEpilogue:
9718       return CM_ScalarEpilogueNotNeededUsePredicate;
9719     case PreferPredicateTy::PredicateOrDontVectorize:
9720       return CM_ScalarEpilogueNotAllowedUsePredicate;
9721     };
9722   }
9723 
9724   // 3) If set, obey the hints
9725   switch (Hints.getPredicate()) {
9726   case LoopVectorizeHints::FK_Enabled:
9727     return CM_ScalarEpilogueNotNeededUsePredicate;
9728   case LoopVectorizeHints::FK_Disabled:
9729     return CM_ScalarEpilogueAllowed;
9730   };
9731 
9732   // 4) if the TTI hook indicates this is profitable, request predication.
9733   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9734                                        LVL.getLAI()))
9735     return CM_ScalarEpilogueNotNeededUsePredicate;
9736 
9737   return CM_ScalarEpilogueAllowed;
9738 }
9739 
9740 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9741   // If Values have been set for this Def return the one relevant for \p Part.
9742   if (hasVectorValue(Def, Part))
9743     return Data.PerPartOutput[Def][Part];
9744 
9745   if (!hasScalarValue(Def, {Part, 0})) {
9746     Value *IRV = Def->getLiveInIRValue();
9747     Value *B = ILV->getBroadcastInstrs(IRV);
9748     set(Def, B, Part);
9749     return B;
9750   }
9751 
9752   Value *ScalarValue = get(Def, {Part, 0});
9753   // If we aren't vectorizing, we can just copy the scalar map values over
9754   // to the vector map.
9755   if (VF.isScalar()) {
9756     set(Def, ScalarValue, Part);
9757     return ScalarValue;
9758   }
9759 
9760   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9761   bool IsUniform = RepR && RepR->isUniform();
9762 
9763   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9764   // Check if there is a scalar value for the selected lane.
9765   if (!hasScalarValue(Def, {Part, LastLane})) {
9766     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
9767     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
9768            "unexpected recipe found to be invariant");
9769     IsUniform = true;
9770     LastLane = 0;
9771   }
9772 
9773   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9774   // Set the insert point after the last scalarized instruction or after the
9775   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
9776   // will directly follow the scalar definitions.
9777   auto OldIP = Builder.saveIP();
9778   auto NewIP =
9779       isa<PHINode>(LastInst)
9780           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
9781           : std::next(BasicBlock::iterator(LastInst));
9782   Builder.SetInsertPoint(&*NewIP);
9783 
9784   // However, if we are vectorizing, we need to construct the vector values.
9785   // If the value is known to be uniform after vectorization, we can just
9786   // broadcast the scalar value corresponding to lane zero for each unroll
9787   // iteration. Otherwise, we construct the vector values using
9788   // insertelement instructions. Since the resulting vectors are stored in
9789   // State, we will only generate the insertelements once.
9790   Value *VectorValue = nullptr;
9791   if (IsUniform) {
9792     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9793     set(Def, VectorValue, Part);
9794   } else {
9795     // Initialize packing with insertelements to start from undef.
9796     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9797     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9798     set(Def, Undef, Part);
9799     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9800       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9801     VectorValue = get(Def, Part);
9802   }
9803   Builder.restoreIP(OldIP);
9804   return VectorValue;
9805 }
9806 
9807 // Process the loop in the VPlan-native vectorization path. This path builds
9808 // VPlan upfront in the vectorization pipeline, which allows to apply
9809 // VPlan-to-VPlan transformations from the very beginning without modifying the
9810 // input LLVM IR.
9811 static bool processLoopInVPlanNativePath(
9812     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9813     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9814     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9815     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9816     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
9817     LoopVectorizationRequirements &Requirements) {
9818 
9819   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9820     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9821     return false;
9822   }
9823   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9824   Function *F = L->getHeader()->getParent();
9825   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9826 
9827   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9828       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9829 
9830   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9831                                 &Hints, IAI);
9832   // Use the planner for outer loop vectorization.
9833   // TODO: CM is not used at this point inside the planner. Turn CM into an
9834   // optional argument if we don't need it in the future.
9835   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
9836                                Requirements, ORE);
9837 
9838   // Get user vectorization factor.
9839   ElementCount UserVF = Hints.getWidth();
9840 
9841   // Plan how to best vectorize, return the best VF and its cost.
9842   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9843 
9844   // If we are stress testing VPlan builds, do not attempt to generate vector
9845   // code. Masked vector code generation support will follow soon.
9846   // Also, do not attempt to vectorize if no vector code will be produced.
9847   if (VPlanBuildStressTest || EnableVPlanPredication ||
9848       VectorizationFactor::Disabled() == VF)
9849     return false;
9850 
9851   LVP.setBestPlan(VF.Width, 1);
9852 
9853   {
9854     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9855                              F->getParent()->getDataLayout());
9856     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9857                            &CM, BFI, PSI, Checks);
9858     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9859                       << L->getHeader()->getParent()->getName() << "\"\n");
9860     LVP.executePlan(LB, DT);
9861   }
9862 
9863   // Mark the loop as already vectorized to avoid vectorizing again.
9864   Hints.setAlreadyVectorized();
9865   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9866   return true;
9867 }
9868 
9869 // Emit a remark if there are stores to floats that required a floating point
9870 // extension. If the vectorized loop was generated with floating point there
9871 // will be a performance penalty from the conversion overhead and the change in
9872 // the vector width.
9873 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9874   SmallVector<Instruction *, 4> Worklist;
9875   for (BasicBlock *BB : L->getBlocks()) {
9876     for (Instruction &Inst : *BB) {
9877       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9878         if (S->getValueOperand()->getType()->isFloatTy())
9879           Worklist.push_back(S);
9880       }
9881     }
9882   }
9883 
9884   // Traverse the floating point stores upwards searching, for floating point
9885   // conversions.
9886   SmallPtrSet<const Instruction *, 4> Visited;
9887   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9888   while (!Worklist.empty()) {
9889     auto *I = Worklist.pop_back_val();
9890     if (!L->contains(I))
9891       continue;
9892     if (!Visited.insert(I).second)
9893       continue;
9894 
9895     // Emit a remark if the floating point store required a floating
9896     // point conversion.
9897     // TODO: More work could be done to identify the root cause such as a
9898     // constant or a function return type and point the user to it.
9899     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9900       ORE->emit([&]() {
9901         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9902                                           I->getDebugLoc(), L->getHeader())
9903                << "floating point conversion changes vector width. "
9904                << "Mixed floating point precision requires an up/down "
9905                << "cast that will negatively impact performance.";
9906       });
9907 
9908     for (Use &Op : I->operands())
9909       if (auto *OpI = dyn_cast<Instruction>(Op))
9910         Worklist.push_back(OpI);
9911   }
9912 }
9913 
9914 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9915     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9916                                !EnableLoopInterleaving),
9917       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9918                               !EnableLoopVectorization) {}
9919 
9920 bool LoopVectorizePass::processLoop(Loop *L) {
9921   assert((EnableVPlanNativePath || L->isInnermost()) &&
9922          "VPlan-native path is not enabled. Only process inner loops.");
9923 
9924 #ifndef NDEBUG
9925   const std::string DebugLocStr = getDebugLocString(L);
9926 #endif /* NDEBUG */
9927 
9928   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9929                     << L->getHeader()->getParent()->getName() << "\" from "
9930                     << DebugLocStr << "\n");
9931 
9932   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9933 
9934   LLVM_DEBUG(
9935       dbgs() << "LV: Loop hints:"
9936              << " force="
9937              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9938                      ? "disabled"
9939                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9940                             ? "enabled"
9941                             : "?"))
9942              << " width=" << Hints.getWidth()
9943              << " interleave=" << Hints.getInterleave() << "\n");
9944 
9945   // Function containing loop
9946   Function *F = L->getHeader()->getParent();
9947 
9948   // Looking at the diagnostic output is the only way to determine if a loop
9949   // was vectorized (other than looking at the IR or machine code), so it
9950   // is important to generate an optimization remark for each loop. Most of
9951   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9952   // generated as OptimizationRemark and OptimizationRemarkMissed are
9953   // less verbose reporting vectorized loops and unvectorized loops that may
9954   // benefit from vectorization, respectively.
9955 
9956   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9957     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9958     return false;
9959   }
9960 
9961   PredicatedScalarEvolution PSE(*SE, *L);
9962 
9963   // Check if it is legal to vectorize the loop.
9964   LoopVectorizationRequirements Requirements;
9965   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9966                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9967   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9968     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9969     Hints.emitRemarkWithHints();
9970     return false;
9971   }
9972 
9973   // Check the function attributes and profiles to find out if this function
9974   // should be optimized for size.
9975   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9976       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9977 
9978   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9979   // here. They may require CFG and instruction level transformations before
9980   // even evaluating whether vectorization is profitable. Since we cannot modify
9981   // the incoming IR, we need to build VPlan upfront in the vectorization
9982   // pipeline.
9983   if (!L->isInnermost())
9984     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9985                                         ORE, BFI, PSI, Hints, Requirements);
9986 
9987   assert(L->isInnermost() && "Inner loop expected.");
9988 
9989   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9990   // count by optimizing for size, to minimize overheads.
9991   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9992   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9993     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9994                       << "This loop is worth vectorizing only if no scalar "
9995                       << "iteration overheads are incurred.");
9996     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9997       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9998     else {
9999       LLVM_DEBUG(dbgs() << "\n");
10000       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10001     }
10002   }
10003 
10004   // Check the function attributes to see if implicit floats are allowed.
10005   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10006   // an integer loop and the vector instructions selected are purely integer
10007   // vector instructions?
10008   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10009     reportVectorizationFailure(
10010         "Can't vectorize when the NoImplicitFloat attribute is used",
10011         "loop not vectorized due to NoImplicitFloat attribute",
10012         "NoImplicitFloat", ORE, L);
10013     Hints.emitRemarkWithHints();
10014     return false;
10015   }
10016 
10017   // Check if the target supports potentially unsafe FP vectorization.
10018   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10019   // for the target we're vectorizing for, to make sure none of the
10020   // additional fp-math flags can help.
10021   if (Hints.isPotentiallyUnsafe() &&
10022       TTI->isFPVectorizationPotentiallyUnsafe()) {
10023     reportVectorizationFailure(
10024         "Potentially unsafe FP op prevents vectorization",
10025         "loop not vectorized due to unsafe FP support.",
10026         "UnsafeFP", ORE, L);
10027     Hints.emitRemarkWithHints();
10028     return false;
10029   }
10030 
10031   if (!LVL.canVectorizeFPMath(EnableStrictReductions)) {
10032     ORE->emit([&]() {
10033       auto *ExactFPMathInst = Requirements.getExactFPInst();
10034       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10035                                                  ExactFPMathInst->getDebugLoc(),
10036                                                  ExactFPMathInst->getParent())
10037              << "loop not vectorized: cannot prove it is safe to reorder "
10038                 "floating-point operations";
10039     });
10040     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10041                          "reorder floating-point operations\n");
10042     Hints.emitRemarkWithHints();
10043     return false;
10044   }
10045 
10046   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10047   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10048 
10049   // If an override option has been passed in for interleaved accesses, use it.
10050   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10051     UseInterleaved = EnableInterleavedMemAccesses;
10052 
10053   // Analyze interleaved memory accesses.
10054   if (UseInterleaved) {
10055     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10056   }
10057 
10058   // Use the cost model.
10059   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10060                                 F, &Hints, IAI);
10061   CM.collectValuesToIgnore();
10062 
10063   // Use the planner for vectorization.
10064   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10065                                Requirements, ORE);
10066 
10067   // Get user vectorization factor and interleave count.
10068   ElementCount UserVF = Hints.getWidth();
10069   unsigned UserIC = Hints.getInterleave();
10070 
10071   // Plan how to best vectorize, return the best VF and its cost.
10072   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10073 
10074   VectorizationFactor VF = VectorizationFactor::Disabled();
10075   unsigned IC = 1;
10076 
10077   if (MaybeVF) {
10078     VF = *MaybeVF;
10079     // Select the interleave count.
10080     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10081   }
10082 
10083   // Identify the diagnostic messages that should be produced.
10084   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10085   bool VectorizeLoop = true, InterleaveLoop = true;
10086   if (VF.Width.isScalar()) {
10087     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10088     VecDiagMsg = std::make_pair(
10089         "VectorizationNotBeneficial",
10090         "the cost-model indicates that vectorization is not beneficial");
10091     VectorizeLoop = false;
10092   }
10093 
10094   if (!MaybeVF && UserIC > 1) {
10095     // Tell the user interleaving was avoided up-front, despite being explicitly
10096     // requested.
10097     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10098                          "interleaving should be avoided up front\n");
10099     IntDiagMsg = std::make_pair(
10100         "InterleavingAvoided",
10101         "Ignoring UserIC, because interleaving was avoided up front");
10102     InterleaveLoop = false;
10103   } else if (IC == 1 && UserIC <= 1) {
10104     // Tell the user interleaving is not beneficial.
10105     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10106     IntDiagMsg = std::make_pair(
10107         "InterleavingNotBeneficial",
10108         "the cost-model indicates that interleaving is not beneficial");
10109     InterleaveLoop = false;
10110     if (UserIC == 1) {
10111       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10112       IntDiagMsg.second +=
10113           " and is explicitly disabled or interleave count is set to 1";
10114     }
10115   } else if (IC > 1 && UserIC == 1) {
10116     // Tell the user interleaving is beneficial, but it explicitly disabled.
10117     LLVM_DEBUG(
10118         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10119     IntDiagMsg = std::make_pair(
10120         "InterleavingBeneficialButDisabled",
10121         "the cost-model indicates that interleaving is beneficial "
10122         "but is explicitly disabled or interleave count is set to 1");
10123     InterleaveLoop = false;
10124   }
10125 
10126   // Override IC if user provided an interleave count.
10127   IC = UserIC > 0 ? UserIC : IC;
10128 
10129   // Emit diagnostic messages, if any.
10130   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10131   if (!VectorizeLoop && !InterleaveLoop) {
10132     // Do not vectorize or interleaving the loop.
10133     ORE->emit([&]() {
10134       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10135                                       L->getStartLoc(), L->getHeader())
10136              << VecDiagMsg.second;
10137     });
10138     ORE->emit([&]() {
10139       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10140                                       L->getStartLoc(), L->getHeader())
10141              << IntDiagMsg.second;
10142     });
10143     return false;
10144   } else if (!VectorizeLoop && InterleaveLoop) {
10145     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10146     ORE->emit([&]() {
10147       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10148                                         L->getStartLoc(), L->getHeader())
10149              << VecDiagMsg.second;
10150     });
10151   } else if (VectorizeLoop && !InterleaveLoop) {
10152     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10153                       << ") in " << DebugLocStr << '\n');
10154     ORE->emit([&]() {
10155       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10156                                         L->getStartLoc(), L->getHeader())
10157              << IntDiagMsg.second;
10158     });
10159   } else if (VectorizeLoop && InterleaveLoop) {
10160     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10161                       << ") in " << DebugLocStr << '\n');
10162     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10163   }
10164 
10165   bool DisableRuntimeUnroll = false;
10166   MDNode *OrigLoopID = L->getLoopID();
10167   {
10168     // Optimistically generate runtime checks. Drop them if they turn out to not
10169     // be profitable. Limit the scope of Checks, so the cleanup happens
10170     // immediately after vector codegeneration is done.
10171     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10172                              F->getParent()->getDataLayout());
10173     if (!VF.Width.isScalar() || IC > 1)
10174       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10175     LVP.setBestPlan(VF.Width, IC);
10176 
10177     using namespace ore;
10178     if (!VectorizeLoop) {
10179       assert(IC > 1 && "interleave count should not be 1 or 0");
10180       // If we decided that it is not legal to vectorize the loop, then
10181       // interleave it.
10182       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10183                                  &CM, BFI, PSI, Checks);
10184       LVP.executePlan(Unroller, DT);
10185 
10186       ORE->emit([&]() {
10187         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10188                                   L->getHeader())
10189                << "interleaved loop (interleaved count: "
10190                << NV("InterleaveCount", IC) << ")";
10191       });
10192     } else {
10193       // If we decided that it is *legal* to vectorize the loop, then do it.
10194 
10195       // Consider vectorizing the epilogue too if it's profitable.
10196       VectorizationFactor EpilogueVF =
10197           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10198       if (EpilogueVF.Width.isVector()) {
10199 
10200         // The first pass vectorizes the main loop and creates a scalar epilogue
10201         // to be vectorized by executing the plan (potentially with a different
10202         // factor) again shortly afterwards.
10203         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
10204                                           EpilogueVF.Width.getKnownMinValue(),
10205                                           1);
10206         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10207                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10208 
10209         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
10210         LVP.executePlan(MainILV, DT);
10211         ++LoopsVectorized;
10212 
10213         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10214         formLCSSARecursively(*L, *DT, LI, SE);
10215 
10216         // Second pass vectorizes the epilogue and adjusts the control flow
10217         // edges from the first pass.
10218         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
10219         EPI.MainLoopVF = EPI.EpilogueVF;
10220         EPI.MainLoopUF = EPI.EpilogueUF;
10221         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10222                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10223                                                  Checks);
10224         LVP.executePlan(EpilogILV, DT);
10225         ++LoopsEpilogueVectorized;
10226 
10227         if (!MainILV.areSafetyChecksAdded())
10228           DisableRuntimeUnroll = true;
10229       } else {
10230         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10231                                &LVL, &CM, BFI, PSI, Checks);
10232         LVP.executePlan(LB, DT);
10233         ++LoopsVectorized;
10234 
10235         // Add metadata to disable runtime unrolling a scalar loop when there
10236         // are no runtime checks about strides and memory. A scalar loop that is
10237         // rarely used is not worth unrolling.
10238         if (!LB.areSafetyChecksAdded())
10239           DisableRuntimeUnroll = true;
10240       }
10241       // Report the vectorization decision.
10242       ORE->emit([&]() {
10243         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10244                                   L->getHeader())
10245                << "vectorized loop (vectorization width: "
10246                << NV("VectorizationFactor", VF.Width)
10247                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10248       });
10249     }
10250 
10251     if (ORE->allowExtraAnalysis(LV_NAME))
10252       checkMixedPrecision(L, ORE);
10253   }
10254 
10255   Optional<MDNode *> RemainderLoopID =
10256       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10257                                       LLVMLoopVectorizeFollowupEpilogue});
10258   if (RemainderLoopID.hasValue()) {
10259     L->setLoopID(RemainderLoopID.getValue());
10260   } else {
10261     if (DisableRuntimeUnroll)
10262       AddRuntimeUnrollDisableMetaData(L);
10263 
10264     // Mark the loop as already vectorized to avoid vectorizing again.
10265     Hints.setAlreadyVectorized();
10266   }
10267 
10268   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10269   return true;
10270 }
10271 
10272 LoopVectorizeResult LoopVectorizePass::runImpl(
10273     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10274     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10275     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10276     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10277     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10278   SE = &SE_;
10279   LI = &LI_;
10280   TTI = &TTI_;
10281   DT = &DT_;
10282   BFI = &BFI_;
10283   TLI = TLI_;
10284   AA = &AA_;
10285   AC = &AC_;
10286   GetLAA = &GetLAA_;
10287   DB = &DB_;
10288   ORE = &ORE_;
10289   PSI = PSI_;
10290 
10291   // Don't attempt if
10292   // 1. the target claims to have no vector registers, and
10293   // 2. interleaving won't help ILP.
10294   //
10295   // The second condition is necessary because, even if the target has no
10296   // vector registers, loop vectorization may still enable scalar
10297   // interleaving.
10298   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10299       TTI->getMaxInterleaveFactor(1) < 2)
10300     return LoopVectorizeResult(false, false);
10301 
10302   bool Changed = false, CFGChanged = false;
10303 
10304   // The vectorizer requires loops to be in simplified form.
10305   // Since simplification may add new inner loops, it has to run before the
10306   // legality and profitability checks. This means running the loop vectorizer
10307   // will simplify all loops, regardless of whether anything end up being
10308   // vectorized.
10309   for (auto &L : *LI)
10310     Changed |= CFGChanged |=
10311         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10312 
10313   // Build up a worklist of inner-loops to vectorize. This is necessary as
10314   // the act of vectorizing or partially unrolling a loop creates new loops
10315   // and can invalidate iterators across the loops.
10316   SmallVector<Loop *, 8> Worklist;
10317 
10318   for (Loop *L : *LI)
10319     collectSupportedLoops(*L, LI, ORE, Worklist);
10320 
10321   LoopsAnalyzed += Worklist.size();
10322 
10323   // Now walk the identified inner loops.
10324   while (!Worklist.empty()) {
10325     Loop *L = Worklist.pop_back_val();
10326 
10327     // For the inner loops we actually process, form LCSSA to simplify the
10328     // transform.
10329     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10330 
10331     Changed |= CFGChanged |= processLoop(L);
10332   }
10333 
10334   // Process each loop nest in the function.
10335   return LoopVectorizeResult(Changed, CFGChanged);
10336 }
10337 
10338 PreservedAnalyses LoopVectorizePass::run(Function &F,
10339                                          FunctionAnalysisManager &AM) {
10340     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10341     auto &LI = AM.getResult<LoopAnalysis>(F);
10342     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10343     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10344     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10345     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10346     auto &AA = AM.getResult<AAManager>(F);
10347     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10348     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10349     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10350     MemorySSA *MSSA = EnableMSSALoopDependency
10351                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
10352                           : nullptr;
10353 
10354     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10355     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10356         [&](Loop &L) -> const LoopAccessInfo & {
10357       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
10358                                         TLI, TTI, nullptr, MSSA};
10359       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10360     };
10361     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10362     ProfileSummaryInfo *PSI =
10363         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10364     LoopVectorizeResult Result =
10365         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10366     if (!Result.MadeAnyChange)
10367       return PreservedAnalyses::all();
10368     PreservedAnalyses PA;
10369 
10370     // We currently do not preserve loopinfo/dominator analyses with outer loop
10371     // vectorization. Until this is addressed, mark these analyses as preserved
10372     // only for non-VPlan-native path.
10373     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10374     if (!EnableVPlanNativePath) {
10375       PA.preserve<LoopAnalysis>();
10376       PA.preserve<DominatorTreeAnalysis>();
10377     }
10378     if (!Result.MadeCFGChange)
10379       PA.preserveSet<CFGAnalyses>();
10380     return PA;
10381 }
10382