1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/PatternMatch.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/InstructionCost.h"
135 #include "llvm/Support/MathExtras.h"
136 #include "llvm/Support/raw_ostream.h"
137 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
138 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
139 #include "llvm/Transforms/Utils/LoopSimplify.h"
140 #include "llvm/Transforms/Utils/LoopUtils.h"
141 #include "llvm/Transforms/Utils/LoopVersioning.h"
142 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
143 #include "llvm/Transforms/Utils/SizeOpts.h"
144 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
145 #include <algorithm>
146 #include <cassert>
147 #include <cstdint>
148 #include <cstdlib>
149 #include <functional>
150 #include <iterator>
151 #include <limits>
152 #include <memory>
153 #include <string>
154 #include <tuple>
155 #include <utility>
156 
157 using namespace llvm;
158 
159 #define LV_NAME "loop-vectorize"
160 #define DEBUG_TYPE LV_NAME
161 
162 #ifndef NDEBUG
163 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
164 #endif
165 
166 /// @{
167 /// Metadata attribute names
168 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
169 const char LLVMLoopVectorizeFollowupVectorized[] =
170     "llvm.loop.vectorize.followup_vectorized";
171 const char LLVMLoopVectorizeFollowupEpilogue[] =
172     "llvm.loop.vectorize.followup_epilogue";
173 /// @}
174 
175 STATISTIC(LoopsVectorized, "Number of loops vectorized");
176 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
177 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
178 
179 static cl::opt<bool> EnableEpilogueVectorization(
180     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
181     cl::desc("Enable vectorization of epilogue loops."));
182 
183 static cl::opt<unsigned> EpilogueVectorizationForceVF(
184     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
185     cl::desc("When epilogue vectorization is enabled, and a value greater than "
186              "1 is specified, forces the given VF for all applicable epilogue "
187              "loops."));
188 
189 static cl::opt<unsigned> EpilogueVectorizationMinVF(
190     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
191     cl::desc("Only loops with vectorization factor equal to or larger than "
192              "the specified value are considered for epilogue vectorization."));
193 
194 /// Loops with a known constant trip count below this number are vectorized only
195 /// if no scalar iteration overheads are incurred.
196 static cl::opt<unsigned> TinyTripCountVectorThreshold(
197     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
198     cl::desc("Loops with a constant trip count that is smaller than this "
199              "value are vectorized only if no scalar iteration overheads "
200              "are incurred."));
201 
202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
203     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
204     cl::desc("The maximum allowed number of runtime memory checks with a "
205              "vectorize(enable) pragma."));
206 
207 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
208 // that predication is preferred, and this lists all options. I.e., the
209 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
210 // and predicate the instructions accordingly. If tail-folding fails, there are
211 // different fallback strategies depending on these values:
212 namespace PreferPredicateTy {
213   enum Option {
214     ScalarEpilogue = 0,
215     PredicateElseScalarEpilogue,
216     PredicateOrDontVectorize
217   };
218 } // namespace PreferPredicateTy
219 
220 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
221     "prefer-predicate-over-epilogue",
222     cl::init(PreferPredicateTy::ScalarEpilogue),
223     cl::Hidden,
224     cl::desc("Tail-folding and predication preferences over creating a scalar "
225              "epilogue loop."),
226     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
227                          "scalar-epilogue",
228                          "Don't tail-predicate loops, create scalar epilogue"),
229               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
230                          "predicate-else-scalar-epilogue",
231                          "prefer tail-folding, create scalar epilogue if tail "
232                          "folding fails."),
233               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
234                          "predicate-dont-vectorize",
235                          "prefers tail-folding, don't attempt vectorization if "
236                          "tail-folding fails.")));
237 
238 static cl::opt<bool> MaximizeBandwidth(
239     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
240     cl::desc("Maximize bandwidth when selecting vectorization factor which "
241              "will be determined by the smallest type in loop."));
242 
243 static cl::opt<bool> EnableInterleavedMemAccesses(
244     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
246 
247 /// An interleave-group may need masking if it resides in a block that needs
248 /// predication, or in order to mask away gaps.
249 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
250     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
251     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
252 
253 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
254     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
255     cl::desc("We don't interleave loops with a estimated constant trip count "
256              "below this number"));
257 
258 static cl::opt<unsigned> ForceTargetNumScalarRegs(
259     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
260     cl::desc("A flag that overrides the target's number of scalar registers."));
261 
262 static cl::opt<unsigned> ForceTargetNumVectorRegs(
263     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
264     cl::desc("A flag that overrides the target's number of vector registers."));
265 
266 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
267     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
268     cl::desc("A flag that overrides the target's max interleave factor for "
269              "scalar loops."));
270 
271 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
272     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
273     cl::desc("A flag that overrides the target's max interleave factor for "
274              "vectorized loops."));
275 
276 static cl::opt<unsigned> ForceTargetInstructionCost(
277     "force-target-instruction-cost", cl::init(0), cl::Hidden,
278     cl::desc("A flag that overrides the target's expected cost for "
279              "an instruction to a single constant value. Mostly "
280              "useful for getting consistent testing."));
281 
282 static cl::opt<bool> ForceTargetSupportsScalableVectors(
283     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
284     cl::desc(
285         "Pretend that scalable vectors are supported, even if the target does "
286         "not support them. This flag should only be used for testing."));
287 
288 static cl::opt<unsigned> SmallLoopCost(
289     "small-loop-cost", cl::init(20), cl::Hidden,
290     cl::desc(
291         "The cost of a loop that is considered 'small' by the interleaver."));
292 
293 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
294     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
295     cl::desc("Enable the use of the block frequency analysis to access PGO "
296              "heuristics minimizing code growth in cold regions and being more "
297              "aggressive in hot regions."));
298 
299 // Runtime interleave loops for load/store throughput.
300 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
301     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
302     cl::desc(
303         "Enable runtime interleaving until load/store ports are saturated"));
304 
305 /// Interleave small loops with scalar reductions.
306 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
307     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
308     cl::desc("Enable interleaving for loops with small iteration counts that "
309              "contain scalar reductions to expose ILP."));
310 
311 /// The number of stores in a loop that are allowed to need predication.
312 static cl::opt<unsigned> NumberOfStoresToPredicate(
313     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
314     cl::desc("Max number of stores to be predicated behind an if."));
315 
316 static cl::opt<bool> EnableIndVarRegisterHeur(
317     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
318     cl::desc("Count the induction variable only once when interleaving"));
319 
320 static cl::opt<bool> EnableCondStoresVectorization(
321     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
322     cl::desc("Enable if predication of stores during vectorization."));
323 
324 static cl::opt<unsigned> MaxNestedScalarReductionIC(
325     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
326     cl::desc("The maximum interleave count to use when interleaving a scalar "
327              "reduction in a nested loop."));
328 
329 static cl::opt<bool>
330     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
331                            cl::Hidden,
332                            cl::desc("Prefer in-loop vector reductions, "
333                                     "overriding the targets preference."));
334 
335 cl::opt<bool> EnableStrictReductions(
336     "enable-strict-reductions", cl::init(false), cl::Hidden,
337     cl::desc("Enable the vectorisation of loops with in-order (strict) "
338              "FP reductions"));
339 
340 static cl::opt<bool> PreferPredicatedReductionSelect(
341     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
342     cl::desc(
343         "Prefer predicating a reduction operation over an after loop select."));
344 
345 cl::opt<bool> EnableVPlanNativePath(
346     "enable-vplan-native-path", cl::init(false), cl::Hidden,
347     cl::desc("Enable VPlan-native vectorization path with "
348              "support for outer loop vectorization."));
349 
350 // FIXME: Remove this switch once we have divergence analysis. Currently we
351 // assume divergent non-backedge branches when this switch is true.
352 cl::opt<bool> EnableVPlanPredication(
353     "enable-vplan-predication", cl::init(false), cl::Hidden,
354     cl::desc("Enable VPlan-native vectorization path predicator with "
355              "support for outer loop vectorization."));
356 
357 // This flag enables the stress testing of the VPlan H-CFG construction in the
358 // VPlan-native vectorization path. It must be used in conjuction with
359 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
360 // verification of the H-CFGs built.
361 static cl::opt<bool> VPlanBuildStressTest(
362     "vplan-build-stress-test", cl::init(false), cl::Hidden,
363     cl::desc(
364         "Build VPlan for every supported loop nest in the function and bail "
365         "out right after the build (stress test the VPlan H-CFG construction "
366         "in the VPlan-native vectorization path)."));
367 
368 cl::opt<bool> llvm::EnableLoopInterleaving(
369     "interleave-loops", cl::init(true), cl::Hidden,
370     cl::desc("Enable loop interleaving in Loop vectorization passes"));
371 cl::opt<bool> llvm::EnableLoopVectorization(
372     "vectorize-loops", cl::init(true), cl::Hidden,
373     cl::desc("Run the Loop vectorization passes"));
374 
375 cl::opt<bool> PrintVPlansInDotFormat(
376     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
377     cl::desc("Use dot format instead of plain text when dumping VPlans"));
378 
379 /// A helper function that returns true if the given type is irregular. The
380 /// type is irregular if its allocated size doesn't equal the store size of an
381 /// element of the corresponding vector type.
382 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
383   // Determine if an array of N elements of type Ty is "bitcast compatible"
384   // with a <N x Ty> vector.
385   // This is only true if there is no padding between the array elements.
386   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
387 }
388 
389 /// A helper function that returns the reciprocal of the block probability of
390 /// predicated blocks. If we return X, we are assuming the predicated block
391 /// will execute once for every X iterations of the loop header.
392 ///
393 /// TODO: We should use actual block probability here, if available. Currently,
394 ///       we always assume predicated blocks have a 50% chance of executing.
395 static unsigned getReciprocalPredBlockProb() { return 2; }
396 
397 /// A helper function that returns an integer or floating-point constant with
398 /// value C.
399 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
400   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
401                            : ConstantFP::get(Ty, C);
402 }
403 
404 /// Returns "best known" trip count for the specified loop \p L as defined by
405 /// the following procedure:
406 ///   1) Returns exact trip count if it is known.
407 ///   2) Returns expected trip count according to profile data if any.
408 ///   3) Returns upper bound estimate if it is known.
409 ///   4) Returns None if all of the above failed.
410 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
411   // Check if exact trip count is known.
412   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
413     return ExpectedTC;
414 
415   // Check if there is an expected trip count available from profile data.
416   if (LoopVectorizeWithBlockFrequency)
417     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
418       return EstimatedTC;
419 
420   // Check if upper bound estimate is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
422     return ExpectedTC;
423 
424   return None;
425 }
426 
427 // Forward declare GeneratedRTChecks.
428 class GeneratedRTChecks;
429 
430 namespace llvm {
431 
432 /// InnerLoopVectorizer vectorizes loops which contain only one basic
433 /// block to a specified vectorization factor (VF).
434 /// This class performs the widening of scalars into vectors, or multiple
435 /// scalars. This class also implements the following features:
436 /// * It inserts an epilogue loop for handling loops that don't have iteration
437 ///   counts that are known to be a multiple of the vectorization factor.
438 /// * It handles the code generation for reduction variables.
439 /// * Scalarization (implementation using scalars) of un-vectorizable
440 ///   instructions.
441 /// InnerLoopVectorizer does not perform any vectorization-legality
442 /// checks, and relies on the caller to check for the different legality
443 /// aspects. The InnerLoopVectorizer relies on the
444 /// LoopVectorizationLegality class to provide information about the induction
445 /// and reduction variables that were found to a given vectorization factor.
446 class InnerLoopVectorizer {
447 public:
448   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
449                       LoopInfo *LI, DominatorTree *DT,
450                       const TargetLibraryInfo *TLI,
451                       const TargetTransformInfo *TTI, AssumptionCache *AC,
452                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
453                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
454                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
455                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
456       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
457         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
458         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
459         PSI(PSI), RTChecks(RTChecks) {
460     // Query this against the original loop and save it here because the profile
461     // of the original loop header may change as the transformation happens.
462     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
463         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
464   }
465 
466   virtual ~InnerLoopVectorizer() = default;
467 
468   /// Create a new empty loop that will contain vectorized instructions later
469   /// on, while the old loop will be used as the scalar remainder. Control flow
470   /// is generated around the vectorized (and scalar epilogue) loops consisting
471   /// of various checks and bypasses. Return the pre-header block of the new
472   /// loop.
473   /// In the case of epilogue vectorization, this function is overriden to
474   /// handle the more complex control flow around the loops.
475   virtual BasicBlock *createVectorizedLoopSkeleton();
476 
477   /// Widen a single instruction within the innermost loop.
478   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
479                         VPTransformState &State);
480 
481   /// Widen a single call instruction within the innermost loop.
482   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
483                             VPTransformState &State);
484 
485   /// Widen a single select instruction within the innermost loop.
486   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
487                               bool InvariantCond, VPTransformState &State);
488 
489   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
490   void fixVectorizedLoop(VPTransformState &State);
491 
492   // Return true if any runtime check is added.
493   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
494 
495   /// A type for vectorized values in the new loop. Each value from the
496   /// original loop, when vectorized, is represented by UF vector values in the
497   /// new unrolled loop, where UF is the unroll factor.
498   using VectorParts = SmallVector<Value *, 2>;
499 
500   /// Vectorize a single GetElementPtrInst based on information gathered and
501   /// decisions taken during planning.
502   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
503                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
504                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
505 
506   /// Vectorize a single PHINode in a block. This method handles the induction
507   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
508   /// arbitrary length vectors.
509   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
510                            VPWidenPHIRecipe *PhiR, VPTransformState &State);
511 
512   /// A helper function to scalarize a single Instruction in the innermost loop.
513   /// Generates a sequence of scalar instances for each lane between \p MinLane
514   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
515   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
516   /// Instr's operands.
517   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
518                             const VPIteration &Instance, bool IfPredicateInstr,
519                             VPTransformState &State);
520 
521   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
522   /// is provided, the integer induction variable will first be truncated to
523   /// the corresponding type.
524   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
525                              VPValue *Def, VPValue *CastDef,
526                              VPTransformState &State);
527 
528   /// Construct the vector value of a scalarized value \p V one lane at a time.
529   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
530                                  VPTransformState &State);
531 
532   /// Try to vectorize interleaved access group \p Group with the base address
533   /// given in \p Addr, optionally masking the vector operations if \p
534   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
535   /// values in the vectorized loop.
536   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
537                                 ArrayRef<VPValue *> VPDefs,
538                                 VPTransformState &State, VPValue *Addr,
539                                 ArrayRef<VPValue *> StoredValues,
540                                 VPValue *BlockInMask = nullptr);
541 
542   /// Vectorize Load and Store instructions with the base address given in \p
543   /// Addr, optionally masking the vector operations if \p BlockInMask is
544   /// non-null. Use \p State to translate given VPValues to IR values in the
545   /// vectorized loop.
546   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
547                                   VPValue *Def, VPValue *Addr,
548                                   VPValue *StoredValue, VPValue *BlockInMask);
549 
550   /// Set the debug location in the builder using the debug location in
551   /// the instruction.
552   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
553 
554   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
555   void fixNonInductionPHIs(VPTransformState &State);
556 
557   /// Returns true if the reordering of FP operations is not allowed, but we are
558   /// able to vectorize with strict in-order reductions for the given RdxDesc.
559   bool useOrderedReductions(RecurrenceDescriptor &RdxDesc);
560 
561   /// Create a broadcast instruction. This method generates a broadcast
562   /// instruction (shuffle) for loop invariant values and for the induction
563   /// value. If this is the induction variable then we extend it to N, N+1, ...
564   /// this is needed because each iteration in the loop corresponds to a SIMD
565   /// element.
566   virtual Value *getBroadcastInstrs(Value *V);
567 
568 protected:
569   friend class LoopVectorizationPlanner;
570 
571   /// A small list of PHINodes.
572   using PhiVector = SmallVector<PHINode *, 4>;
573 
574   /// A type for scalarized values in the new loop. Each value from the
575   /// original loop, when scalarized, is represented by UF x VF scalar values
576   /// in the new unrolled loop, where UF is the unroll factor and VF is the
577   /// vectorization factor.
578   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
579 
580   /// Set up the values of the IVs correctly when exiting the vector loop.
581   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
582                     Value *CountRoundDown, Value *EndValue,
583                     BasicBlock *MiddleBlock);
584 
585   /// Create a new induction variable inside L.
586   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
587                                    Value *Step, Instruction *DL);
588 
589   /// Handle all cross-iteration phis in the header.
590   void fixCrossIterationPHIs(VPTransformState &State);
591 
592   /// Fix a first-order recurrence. This is the second phase of vectorizing
593   /// this phi node.
594   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
595 
596   /// Fix a reduction cross-iteration phi. This is the second phase of
597   /// vectorizing this phi node.
598   void fixReduction(VPWidenPHIRecipe *Phi, VPTransformState &State);
599 
600   /// Clear NSW/NUW flags from reduction instructions if necessary.
601   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
602                                VPTransformState &State);
603 
604   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
605   /// means we need to add the appropriate incoming value from the middle
606   /// block as exiting edges from the scalar epilogue loop (if present) are
607   /// already in place, and we exit the vector loop exclusively to the middle
608   /// block.
609   void fixLCSSAPHIs(VPTransformState &State);
610 
611   /// Iteratively sink the scalarized operands of a predicated instruction into
612   /// the block that was created for it.
613   void sinkScalarOperands(Instruction *PredInst);
614 
615   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
616   /// represented as.
617   void truncateToMinimalBitwidths(VPTransformState &State);
618 
619   /// This function adds
620   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
621   /// to each vector element of Val. The sequence starts at StartIndex.
622   /// \p Opcode is relevant for FP induction variable.
623   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
624                                Instruction::BinaryOps Opcode =
625                                Instruction::BinaryOpsEnd);
626 
627   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
628   /// variable on which to base the steps, \p Step is the size of the step, and
629   /// \p EntryVal is the value from the original loop that maps to the steps.
630   /// Note that \p EntryVal doesn't have to be an induction variable - it
631   /// can also be a truncate instruction.
632   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
633                         const InductionDescriptor &ID, VPValue *Def,
634                         VPValue *CastDef, VPTransformState &State);
635 
636   /// Create a vector induction phi node based on an existing scalar one. \p
637   /// EntryVal is the value from the original loop that maps to the vector phi
638   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
639   /// truncate instruction, instead of widening the original IV, we widen a
640   /// version of the IV truncated to \p EntryVal's type.
641   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
642                                        Value *Step, Value *Start,
643                                        Instruction *EntryVal, VPValue *Def,
644                                        VPValue *CastDef,
645                                        VPTransformState &State);
646 
647   /// Returns true if an instruction \p I should be scalarized instead of
648   /// vectorized for the chosen vectorization factor.
649   bool shouldScalarizeInstruction(Instruction *I) const;
650 
651   /// Returns true if we should generate a scalar version of \p IV.
652   bool needsScalarInduction(Instruction *IV) const;
653 
654   /// If there is a cast involved in the induction variable \p ID, which should
655   /// be ignored in the vectorized loop body, this function records the
656   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
657   /// cast. We had already proved that the casted Phi is equal to the uncasted
658   /// Phi in the vectorized loop (under a runtime guard), and therefore
659   /// there is no need to vectorize the cast - the same value can be used in the
660   /// vector loop for both the Phi and the cast.
661   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
662   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
663   ///
664   /// \p EntryVal is the value from the original loop that maps to the vector
665   /// phi node and is used to distinguish what is the IV currently being
666   /// processed - original one (if \p EntryVal is a phi corresponding to the
667   /// original IV) or the "newly-created" one based on the proof mentioned above
668   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
669   /// latter case \p EntryVal is a TruncInst and we must not record anything for
670   /// that IV, but it's error-prone to expect callers of this routine to care
671   /// about that, hence this explicit parameter.
672   void recordVectorLoopValueForInductionCast(
673       const InductionDescriptor &ID, const Instruction *EntryVal,
674       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
675       unsigned Part, unsigned Lane = UINT_MAX);
676 
677   /// Generate a shuffle sequence that will reverse the vector Vec.
678   virtual Value *reverseVector(Value *Vec);
679 
680   /// Returns (and creates if needed) the original loop trip count.
681   Value *getOrCreateTripCount(Loop *NewLoop);
682 
683   /// Returns (and creates if needed) the trip count of the widened loop.
684   Value *getOrCreateVectorTripCount(Loop *NewLoop);
685 
686   /// Returns a bitcasted value to the requested vector type.
687   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
688   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
689                                 const DataLayout &DL);
690 
691   /// Emit a bypass check to see if the vector trip count is zero, including if
692   /// it overflows.
693   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
694 
695   /// Emit a bypass check to see if all of the SCEV assumptions we've
696   /// had to make are correct. Returns the block containing the checks or
697   /// nullptr if no checks have been added.
698   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
699 
700   /// Emit bypass checks to check any memory assumptions we may have made.
701   /// Returns the block containing the checks or nullptr if no checks have been
702   /// added.
703   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
704 
705   /// Compute the transformed value of Index at offset StartValue using step
706   /// StepValue.
707   /// For integer induction, returns StartValue + Index * StepValue.
708   /// For pointer induction, returns StartValue[Index * StepValue].
709   /// FIXME: The newly created binary instructions should contain nsw/nuw
710   /// flags, which can be found from the original scalar operations.
711   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
712                               const DataLayout &DL,
713                               const InductionDescriptor &ID) const;
714 
715   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
716   /// vector loop preheader, middle block and scalar preheader. Also
717   /// allocate a loop object for the new vector loop and return it.
718   Loop *createVectorLoopSkeleton(StringRef Prefix);
719 
720   /// Create new phi nodes for the induction variables to resume iteration count
721   /// in the scalar epilogue, from where the vectorized loop left off (given by
722   /// \p VectorTripCount).
723   /// In cases where the loop skeleton is more complicated (eg. epilogue
724   /// vectorization) and the resume values can come from an additional bypass
725   /// block, the \p AdditionalBypass pair provides information about the bypass
726   /// block and the end value on the edge from bypass to this loop.
727   void createInductionResumeValues(
728       Loop *L, Value *VectorTripCount,
729       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
730 
731   /// Complete the loop skeleton by adding debug MDs, creating appropriate
732   /// conditional branches in the middle block, preparing the builder and
733   /// running the verifier. Take in the vector loop \p L as argument, and return
734   /// the preheader of the completed vector loop.
735   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
736 
737   /// Add additional metadata to \p To that was not present on \p Orig.
738   ///
739   /// Currently this is used to add the noalias annotations based on the
740   /// inserted memchecks.  Use this for instructions that are *cloned* into the
741   /// vector loop.
742   void addNewMetadata(Instruction *To, const Instruction *Orig);
743 
744   /// Add metadata from one instruction to another.
745   ///
746   /// This includes both the original MDs from \p From and additional ones (\see
747   /// addNewMetadata).  Use this for *newly created* instructions in the vector
748   /// loop.
749   void addMetadata(Instruction *To, Instruction *From);
750 
751   /// Similar to the previous function but it adds the metadata to a
752   /// vector of instructions.
753   void addMetadata(ArrayRef<Value *> To, Instruction *From);
754 
755   /// Allow subclasses to override and print debug traces before/after vplan
756   /// execution, when trace information is requested.
757   virtual void printDebugTracesAtStart(){};
758   virtual void printDebugTracesAtEnd(){};
759 
760   /// The original loop.
761   Loop *OrigLoop;
762 
763   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
764   /// dynamic knowledge to simplify SCEV expressions and converts them to a
765   /// more usable form.
766   PredicatedScalarEvolution &PSE;
767 
768   /// Loop Info.
769   LoopInfo *LI;
770 
771   /// Dominator Tree.
772   DominatorTree *DT;
773 
774   /// Alias Analysis.
775   AAResults *AA;
776 
777   /// Target Library Info.
778   const TargetLibraryInfo *TLI;
779 
780   /// Target Transform Info.
781   const TargetTransformInfo *TTI;
782 
783   /// Assumption Cache.
784   AssumptionCache *AC;
785 
786   /// Interface to emit optimization remarks.
787   OptimizationRemarkEmitter *ORE;
788 
789   /// LoopVersioning.  It's only set up (non-null) if memchecks were
790   /// used.
791   ///
792   /// This is currently only used to add no-alias metadata based on the
793   /// memchecks.  The actually versioning is performed manually.
794   std::unique_ptr<LoopVersioning> LVer;
795 
796   /// The vectorization SIMD factor to use. Each vector will have this many
797   /// vector elements.
798   ElementCount VF;
799 
800   /// The vectorization unroll factor to use. Each scalar is vectorized to this
801   /// many different vector instructions.
802   unsigned UF;
803 
804   /// The builder that we use
805   IRBuilder<> Builder;
806 
807   // --- Vectorization state ---
808 
809   /// The vector-loop preheader.
810   BasicBlock *LoopVectorPreHeader;
811 
812   /// The scalar-loop preheader.
813   BasicBlock *LoopScalarPreHeader;
814 
815   /// Middle Block between the vector and the scalar.
816   BasicBlock *LoopMiddleBlock;
817 
818   /// The (unique) ExitBlock of the scalar loop.  Note that
819   /// there can be multiple exiting edges reaching this block.
820   BasicBlock *LoopExitBlock;
821 
822   /// The vector loop body.
823   BasicBlock *LoopVectorBody;
824 
825   /// The scalar loop body.
826   BasicBlock *LoopScalarBody;
827 
828   /// A list of all bypass blocks. The first block is the entry of the loop.
829   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
830 
831   /// The new Induction variable which was added to the new block.
832   PHINode *Induction = nullptr;
833 
834   /// The induction variable of the old basic block.
835   PHINode *OldInduction = nullptr;
836 
837   /// Store instructions that were predicated.
838   SmallVector<Instruction *, 4> PredicatedInstructions;
839 
840   /// Trip count of the original loop.
841   Value *TripCount = nullptr;
842 
843   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
844   Value *VectorTripCount = nullptr;
845 
846   /// The legality analysis.
847   LoopVectorizationLegality *Legal;
848 
849   /// The profitablity analysis.
850   LoopVectorizationCostModel *Cost;
851 
852   // Record whether runtime checks are added.
853   bool AddedSafetyChecks = false;
854 
855   // Holds the end values for each induction variable. We save the end values
856   // so we can later fix-up the external users of the induction variables.
857   DenseMap<PHINode *, Value *> IVEndValues;
858 
859   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
860   // fixed up at the end of vector code generation.
861   SmallVector<PHINode *, 8> OrigPHIsToFix;
862 
863   /// BFI and PSI are used to check for profile guided size optimizations.
864   BlockFrequencyInfo *BFI;
865   ProfileSummaryInfo *PSI;
866 
867   // Whether this loop should be optimized for size based on profile guided size
868   // optimizatios.
869   bool OptForSizeBasedOnProfile;
870 
871   /// Structure to hold information about generated runtime checks, responsible
872   /// for cleaning the checks, if vectorization turns out unprofitable.
873   GeneratedRTChecks &RTChecks;
874 };
875 
876 class InnerLoopUnroller : public InnerLoopVectorizer {
877 public:
878   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
879                     LoopInfo *LI, DominatorTree *DT,
880                     const TargetLibraryInfo *TLI,
881                     const TargetTransformInfo *TTI, AssumptionCache *AC,
882                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
883                     LoopVectorizationLegality *LVL,
884                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
885                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
886       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
887                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
888                             BFI, PSI, Check) {}
889 
890 private:
891   Value *getBroadcastInstrs(Value *V) override;
892   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
893                        Instruction::BinaryOps Opcode =
894                        Instruction::BinaryOpsEnd) override;
895   Value *reverseVector(Value *Vec) override;
896 };
897 
898 /// Encapsulate information regarding vectorization of a loop and its epilogue.
899 /// This information is meant to be updated and used across two stages of
900 /// epilogue vectorization.
901 struct EpilogueLoopVectorizationInfo {
902   ElementCount MainLoopVF = ElementCount::getFixed(0);
903   unsigned MainLoopUF = 0;
904   ElementCount EpilogueVF = ElementCount::getFixed(0);
905   unsigned EpilogueUF = 0;
906   BasicBlock *MainLoopIterationCountCheck = nullptr;
907   BasicBlock *EpilogueIterationCountCheck = nullptr;
908   BasicBlock *SCEVSafetyCheck = nullptr;
909   BasicBlock *MemSafetyCheck = nullptr;
910   Value *TripCount = nullptr;
911   Value *VectorTripCount = nullptr;
912 
913   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
914                                 unsigned EUF)
915       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
916         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
917     assert(EUF == 1 &&
918            "A high UF for the epilogue loop is likely not beneficial.");
919   }
920 };
921 
922 /// An extension of the inner loop vectorizer that creates a skeleton for a
923 /// vectorized loop that has its epilogue (residual) also vectorized.
924 /// The idea is to run the vplan on a given loop twice, firstly to setup the
925 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
926 /// from the first step and vectorize the epilogue.  This is achieved by
927 /// deriving two concrete strategy classes from this base class and invoking
928 /// them in succession from the loop vectorizer planner.
929 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
930 public:
931   InnerLoopAndEpilogueVectorizer(
932       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
933       DominatorTree *DT, const TargetLibraryInfo *TLI,
934       const TargetTransformInfo *TTI, AssumptionCache *AC,
935       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
936       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
937       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
938       GeneratedRTChecks &Checks)
939       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
940                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
941                             Checks),
942         EPI(EPI) {}
943 
944   // Override this function to handle the more complex control flow around the
945   // three loops.
946   BasicBlock *createVectorizedLoopSkeleton() final override {
947     return createEpilogueVectorizedLoopSkeleton();
948   }
949 
950   /// The interface for creating a vectorized skeleton using one of two
951   /// different strategies, each corresponding to one execution of the vplan
952   /// as described above.
953   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
954 
955   /// Holds and updates state information required to vectorize the main loop
956   /// and its epilogue in two separate passes. This setup helps us avoid
957   /// regenerating and recomputing runtime safety checks. It also helps us to
958   /// shorten the iteration-count-check path length for the cases where the
959   /// iteration count of the loop is so small that the main vector loop is
960   /// completely skipped.
961   EpilogueLoopVectorizationInfo &EPI;
962 };
963 
964 /// A specialized derived class of inner loop vectorizer that performs
965 /// vectorization of *main* loops in the process of vectorizing loops and their
966 /// epilogues.
967 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
968 public:
969   EpilogueVectorizerMainLoop(
970       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
971       DominatorTree *DT, const TargetLibraryInfo *TLI,
972       const TargetTransformInfo *TTI, AssumptionCache *AC,
973       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
974       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
975       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
976       GeneratedRTChecks &Check)
977       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
978                                        EPI, LVL, CM, BFI, PSI, Check) {}
979   /// Implements the interface for creating a vectorized skeleton using the
980   /// *main loop* strategy (ie the first pass of vplan execution).
981   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
982 
983 protected:
984   /// Emits an iteration count bypass check once for the main loop (when \p
985   /// ForEpilogue is false) and once for the epilogue loop (when \p
986   /// ForEpilogue is true).
987   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
988                                              bool ForEpilogue);
989   void printDebugTracesAtStart() override;
990   void printDebugTracesAtEnd() override;
991 };
992 
993 // A specialized derived class of inner loop vectorizer that performs
994 // vectorization of *epilogue* loops in the process of vectorizing loops and
995 // their epilogues.
996 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
997 public:
998   EpilogueVectorizerEpilogueLoop(
999       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1000       DominatorTree *DT, const TargetLibraryInfo *TLI,
1001       const TargetTransformInfo *TTI, AssumptionCache *AC,
1002       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1003       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1004       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1005       GeneratedRTChecks &Checks)
1006       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1007                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1008   /// Implements the interface for creating a vectorized skeleton using the
1009   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1010   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1011 
1012 protected:
1013   /// Emits an iteration count bypass check after the main vector loop has
1014   /// finished to see if there are any iterations left to execute by either
1015   /// the vector epilogue or the scalar epilogue.
1016   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1017                                                       BasicBlock *Bypass,
1018                                                       BasicBlock *Insert);
1019   void printDebugTracesAtStart() override;
1020   void printDebugTracesAtEnd() override;
1021 };
1022 } // end namespace llvm
1023 
1024 /// Look for a meaningful debug location on the instruction or it's
1025 /// operands.
1026 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1027   if (!I)
1028     return I;
1029 
1030   DebugLoc Empty;
1031   if (I->getDebugLoc() != Empty)
1032     return I;
1033 
1034   for (Use &Op : I->operands()) {
1035     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1036       if (OpInst->getDebugLoc() != Empty)
1037         return OpInst;
1038   }
1039 
1040   return I;
1041 }
1042 
1043 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1044   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1045     const DILocation *DIL = Inst->getDebugLoc();
1046 
1047     // When a FSDiscriminator is enabled, we don't need to add the multiply
1048     // factors to the discriminators.
1049     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1050         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1051       // FIXME: For scalable vectors, assume vscale=1.
1052       auto NewDIL =
1053           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1054       if (NewDIL)
1055         B.SetCurrentDebugLocation(NewDIL.getValue());
1056       else
1057         LLVM_DEBUG(dbgs()
1058                    << "Failed to create new discriminator: "
1059                    << DIL->getFilename() << " Line: " << DIL->getLine());
1060     } else
1061       B.SetCurrentDebugLocation(DIL);
1062   } else
1063     B.SetCurrentDebugLocation(DebugLoc());
1064 }
1065 
1066 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1067 /// is passed, the message relates to that particular instruction.
1068 #ifndef NDEBUG
1069 static void debugVectorizationMessage(const StringRef Prefix,
1070                                       const StringRef DebugMsg,
1071                                       Instruction *I) {
1072   dbgs() << "LV: " << Prefix << DebugMsg;
1073   if (I != nullptr)
1074     dbgs() << " " << *I;
1075   else
1076     dbgs() << '.';
1077   dbgs() << '\n';
1078 }
1079 #endif
1080 
1081 /// Create an analysis remark that explains why vectorization failed
1082 ///
1083 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1084 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1085 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1086 /// the location of the remark.  \return the remark object that can be
1087 /// streamed to.
1088 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1089     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1090   Value *CodeRegion = TheLoop->getHeader();
1091   DebugLoc DL = TheLoop->getStartLoc();
1092 
1093   if (I) {
1094     CodeRegion = I->getParent();
1095     // If there is no debug location attached to the instruction, revert back to
1096     // using the loop's.
1097     if (I->getDebugLoc())
1098       DL = I->getDebugLoc();
1099   }
1100 
1101   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1102 }
1103 
1104 /// Return a value for Step multiplied by VF.
1105 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1106   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1107   Constant *StepVal = ConstantInt::get(
1108       Step->getType(),
1109       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1110   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1111 }
1112 
1113 namespace llvm {
1114 
1115 /// Return the runtime value for VF.
1116 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1117   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1118   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1119 }
1120 
1121 void reportVectorizationFailure(const StringRef DebugMsg,
1122                                 const StringRef OREMsg, const StringRef ORETag,
1123                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1124                                 Instruction *I) {
1125   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1126   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1127   ORE->emit(
1128       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1129       << "loop not vectorized: " << OREMsg);
1130 }
1131 
1132 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1133                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1134                              Instruction *I) {
1135   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1136   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1137   ORE->emit(
1138       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1139       << Msg);
1140 }
1141 
1142 } // end namespace llvm
1143 
1144 #ifndef NDEBUG
1145 /// \return string containing a file name and a line # for the given loop.
1146 static std::string getDebugLocString(const Loop *L) {
1147   std::string Result;
1148   if (L) {
1149     raw_string_ostream OS(Result);
1150     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1151       LoopDbgLoc.print(OS);
1152     else
1153       // Just print the module name.
1154       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1155     OS.flush();
1156   }
1157   return Result;
1158 }
1159 #endif
1160 
1161 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1162                                          const Instruction *Orig) {
1163   // If the loop was versioned with memchecks, add the corresponding no-alias
1164   // metadata.
1165   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1166     LVer->annotateInstWithNoAlias(To, Orig);
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(Instruction *To,
1170                                       Instruction *From) {
1171   propagateMetadata(To, From);
1172   addNewMetadata(To, From);
1173 }
1174 
1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1176                                       Instruction *From) {
1177   for (Value *V : To) {
1178     if (Instruction *I = dyn_cast<Instruction>(V))
1179       addMetadata(I, From);
1180   }
1181 }
1182 
1183 namespace llvm {
1184 
1185 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1186 // lowered.
1187 enum ScalarEpilogueLowering {
1188 
1189   // The default: allowing scalar epilogues.
1190   CM_ScalarEpilogueAllowed,
1191 
1192   // Vectorization with OptForSize: don't allow epilogues.
1193   CM_ScalarEpilogueNotAllowedOptSize,
1194 
1195   // A special case of vectorisation with OptForSize: loops with a very small
1196   // trip count are considered for vectorization under OptForSize, thereby
1197   // making sure the cost of their loop body is dominant, free of runtime
1198   // guards and scalar iteration overheads.
1199   CM_ScalarEpilogueNotAllowedLowTripLoop,
1200 
1201   // Loop hint predicate indicating an epilogue is undesired.
1202   CM_ScalarEpilogueNotNeededUsePredicate,
1203 
1204   // Directive indicating we must either tail fold or not vectorize
1205   CM_ScalarEpilogueNotAllowedUsePredicate
1206 };
1207 
1208 /// ElementCountComparator creates a total ordering for ElementCount
1209 /// for the purposes of using it in a set structure.
1210 struct ElementCountComparator {
1211   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1212     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1213            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1214   }
1215 };
1216 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1217 
1218 /// LoopVectorizationCostModel - estimates the expected speedups due to
1219 /// vectorization.
1220 /// In many cases vectorization is not profitable. This can happen because of
1221 /// a number of reasons. In this class we mainly attempt to predict the
1222 /// expected speedup/slowdowns due to the supported instruction set. We use the
1223 /// TargetTransformInfo to query the different backends for the cost of
1224 /// different operations.
1225 class LoopVectorizationCostModel {
1226 public:
1227   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1228                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1229                              LoopVectorizationLegality *Legal,
1230                              const TargetTransformInfo &TTI,
1231                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1232                              AssumptionCache *AC,
1233                              OptimizationRemarkEmitter *ORE, const Function *F,
1234                              const LoopVectorizeHints *Hints,
1235                              InterleavedAccessInfo &IAI)
1236       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1237         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1238         Hints(Hints), InterleaveInfo(IAI) {}
1239 
1240   /// \return An upper bound for the vectorization factors (both fixed and
1241   /// scalable). If the factors are 0, vectorization and interleaving should be
1242   /// avoided up front.
1243   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1244 
1245   /// \return True if runtime checks are required for vectorization, and false
1246   /// otherwise.
1247   bool runtimeChecksRequired();
1248 
1249   /// \return The most profitable vectorization factor and the cost of that VF.
1250   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1251   /// then this vectorization factor will be selected if vectorization is
1252   /// possible.
1253   VectorizationFactor
1254   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1255 
1256   VectorizationFactor
1257   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1258                                     const LoopVectorizationPlanner &LVP);
1259 
1260   /// Setup cost-based decisions for user vectorization factor.
1261   void selectUserVectorizationFactor(ElementCount UserVF) {
1262     collectUniformsAndScalars(UserVF);
1263     collectInstsToScalarize(UserVF);
1264   }
1265 
1266   /// \return The size (in bits) of the smallest and widest types in the code
1267   /// that needs to be vectorized. We ignore values that remain scalar such as
1268   /// 64 bit loop indices.
1269   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1270 
1271   /// \return The desired interleave count.
1272   /// If interleave count has been specified by metadata it will be returned.
1273   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1274   /// are the selected vectorization factor and the cost of the selected VF.
1275   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1276 
1277   /// Memory access instruction may be vectorized in more than one way.
1278   /// Form of instruction after vectorization depends on cost.
1279   /// This function takes cost-based decisions for Load/Store instructions
1280   /// and collects them in a map. This decisions map is used for building
1281   /// the lists of loop-uniform and loop-scalar instructions.
1282   /// The calculated cost is saved with widening decision in order to
1283   /// avoid redundant calculations.
1284   void setCostBasedWideningDecision(ElementCount VF);
1285 
1286   /// A struct that represents some properties of the register usage
1287   /// of a loop.
1288   struct RegisterUsage {
1289     /// Holds the number of loop invariant values that are used in the loop.
1290     /// The key is ClassID of target-provided register class.
1291     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1292     /// Holds the maximum number of concurrent live intervals in the loop.
1293     /// The key is ClassID of target-provided register class.
1294     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1295   };
1296 
1297   /// \return Returns information about the register usages of the loop for the
1298   /// given vectorization factors.
1299   SmallVector<RegisterUsage, 8>
1300   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1301 
1302   /// Collect values we want to ignore in the cost model.
1303   void collectValuesToIgnore();
1304 
1305   /// Split reductions into those that happen in the loop, and those that happen
1306   /// outside. In loop reductions are collected into InLoopReductionChains.
1307   void collectInLoopReductions();
1308 
1309   /// Returns true if we should use strict in-order reductions for the given
1310   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1311   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1312   /// of FP operations.
1313   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1314     return EnableStrictReductions && !Hints->allowReordering() &&
1315            RdxDesc.isOrdered();
1316   }
1317 
1318   /// \returns The smallest bitwidth each instruction can be represented with.
1319   /// The vector equivalents of these instructions should be truncated to this
1320   /// type.
1321   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1322     return MinBWs;
1323   }
1324 
1325   /// \returns True if it is more profitable to scalarize instruction \p I for
1326   /// vectorization factor \p VF.
1327   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1328     assert(VF.isVector() &&
1329            "Profitable to scalarize relevant only for VF > 1.");
1330 
1331     // Cost model is not run in the VPlan-native path - return conservative
1332     // result until this changes.
1333     if (EnableVPlanNativePath)
1334       return false;
1335 
1336     auto Scalars = InstsToScalarize.find(VF);
1337     assert(Scalars != InstsToScalarize.end() &&
1338            "VF not yet analyzed for scalarization profitability");
1339     return Scalars->second.find(I) != Scalars->second.end();
1340   }
1341 
1342   /// Returns true if \p I is known to be uniform after vectorization.
1343   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1344     if (VF.isScalar())
1345       return true;
1346 
1347     // Cost model is not run in the VPlan-native path - return conservative
1348     // result until this changes.
1349     if (EnableVPlanNativePath)
1350       return false;
1351 
1352     auto UniformsPerVF = Uniforms.find(VF);
1353     assert(UniformsPerVF != Uniforms.end() &&
1354            "VF not yet analyzed for uniformity");
1355     return UniformsPerVF->second.count(I);
1356   }
1357 
1358   /// Returns true if \p I is known to be scalar after vectorization.
1359   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1360     if (VF.isScalar())
1361       return true;
1362 
1363     // Cost model is not run in the VPlan-native path - return conservative
1364     // result until this changes.
1365     if (EnableVPlanNativePath)
1366       return false;
1367 
1368     auto ScalarsPerVF = Scalars.find(VF);
1369     assert(ScalarsPerVF != Scalars.end() &&
1370            "Scalar values are not calculated for VF");
1371     return ScalarsPerVF->second.count(I);
1372   }
1373 
1374   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1375   /// for vectorization factor \p VF.
1376   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1377     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1378            !isProfitableToScalarize(I, VF) &&
1379            !isScalarAfterVectorization(I, VF);
1380   }
1381 
1382   /// Decision that was taken during cost calculation for memory instruction.
1383   enum InstWidening {
1384     CM_Unknown,
1385     CM_Widen,         // For consecutive accesses with stride +1.
1386     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1387     CM_Interleave,
1388     CM_GatherScatter,
1389     CM_Scalarize
1390   };
1391 
1392   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1393   /// instruction \p I and vector width \p VF.
1394   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1395                            InstructionCost Cost) {
1396     assert(VF.isVector() && "Expected VF >=2");
1397     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1398   }
1399 
1400   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1401   /// interleaving group \p Grp and vector width \p VF.
1402   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1403                            ElementCount VF, InstWidening W,
1404                            InstructionCost Cost) {
1405     assert(VF.isVector() && "Expected VF >=2");
1406     /// Broadcast this decicion to all instructions inside the group.
1407     /// But the cost will be assigned to one instruction only.
1408     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1409       if (auto *I = Grp->getMember(i)) {
1410         if (Grp->getInsertPos() == I)
1411           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1412         else
1413           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1414       }
1415     }
1416   }
1417 
1418   /// Return the cost model decision for the given instruction \p I and vector
1419   /// width \p VF. Return CM_Unknown if this instruction did not pass
1420   /// through the cost modeling.
1421   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1422     assert(VF.isVector() && "Expected VF to be a vector VF");
1423     // Cost model is not run in the VPlan-native path - return conservative
1424     // result until this changes.
1425     if (EnableVPlanNativePath)
1426       return CM_GatherScatter;
1427 
1428     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1429     auto Itr = WideningDecisions.find(InstOnVF);
1430     if (Itr == WideningDecisions.end())
1431       return CM_Unknown;
1432     return Itr->second.first;
1433   }
1434 
1435   /// Return the vectorization cost for the given instruction \p I and vector
1436   /// width \p VF.
1437   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1438     assert(VF.isVector() && "Expected VF >=2");
1439     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1440     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1441            "The cost is not calculated");
1442     return WideningDecisions[InstOnVF].second;
1443   }
1444 
1445   /// Return True if instruction \p I is an optimizable truncate whose operand
1446   /// is an induction variable. Such a truncate will be removed by adding a new
1447   /// induction variable with the destination type.
1448   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1449     // If the instruction is not a truncate, return false.
1450     auto *Trunc = dyn_cast<TruncInst>(I);
1451     if (!Trunc)
1452       return false;
1453 
1454     // Get the source and destination types of the truncate.
1455     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1456     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1457 
1458     // If the truncate is free for the given types, return false. Replacing a
1459     // free truncate with an induction variable would add an induction variable
1460     // update instruction to each iteration of the loop. We exclude from this
1461     // check the primary induction variable since it will need an update
1462     // instruction regardless.
1463     Value *Op = Trunc->getOperand(0);
1464     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1465       return false;
1466 
1467     // If the truncated value is not an induction variable, return false.
1468     return Legal->isInductionPhi(Op);
1469   }
1470 
1471   /// Collects the instructions to scalarize for each predicated instruction in
1472   /// the loop.
1473   void collectInstsToScalarize(ElementCount VF);
1474 
1475   /// Collect Uniform and Scalar values for the given \p VF.
1476   /// The sets depend on CM decision for Load/Store instructions
1477   /// that may be vectorized as interleave, gather-scatter or scalarized.
1478   void collectUniformsAndScalars(ElementCount VF) {
1479     // Do the analysis once.
1480     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1481       return;
1482     setCostBasedWideningDecision(VF);
1483     collectLoopUniforms(VF);
1484     collectLoopScalars(VF);
1485   }
1486 
1487   /// Returns true if the target machine supports masked store operation
1488   /// for the given \p DataType and kind of access to \p Ptr.
1489   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1490     return Legal->isConsecutivePtr(Ptr) &&
1491            TTI.isLegalMaskedStore(DataType, Alignment);
1492   }
1493 
1494   /// Returns true if the target machine supports masked load operation
1495   /// for the given \p DataType and kind of access to \p Ptr.
1496   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1497     return Legal->isConsecutivePtr(Ptr) &&
1498            TTI.isLegalMaskedLoad(DataType, Alignment);
1499   }
1500 
1501   /// Returns true if the target machine can represent \p V as a masked gather
1502   /// or scatter operation.
1503   bool isLegalGatherOrScatter(Value *V) {
1504     bool LI = isa<LoadInst>(V);
1505     bool SI = isa<StoreInst>(V);
1506     if (!LI && !SI)
1507       return false;
1508     auto *Ty = getLoadStoreType(V);
1509     Align Align = getLoadStoreAlignment(V);
1510     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1511            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1512   }
1513 
1514   /// Returns true if the target machine supports all of the reduction
1515   /// variables found for the given VF.
1516   bool canVectorizeReductions(ElementCount VF) {
1517     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1518       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1519       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1520     }));
1521   }
1522 
1523   /// Returns true if \p I is an instruction that will be scalarized with
1524   /// predication. Such instructions include conditional stores and
1525   /// instructions that may divide by zero.
1526   /// If a non-zero VF has been calculated, we check if I will be scalarized
1527   /// predication for that VF.
1528   bool isScalarWithPredication(Instruction *I) const;
1529 
1530   // Returns true if \p I is an instruction that will be predicated either
1531   // through scalar predication or masked load/store or masked gather/scatter.
1532   // Superset of instructions that return true for isScalarWithPredication.
1533   bool isPredicatedInst(Instruction *I) {
1534     if (!blockNeedsPredication(I->getParent()))
1535       return false;
1536     // Loads and stores that need some form of masked operation are predicated
1537     // instructions.
1538     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1539       return Legal->isMaskRequired(I);
1540     return isScalarWithPredication(I);
1541   }
1542 
1543   /// Returns true if \p I is a memory instruction with consecutive memory
1544   /// access that can be widened.
1545   bool
1546   memoryInstructionCanBeWidened(Instruction *I,
1547                                 ElementCount VF = ElementCount::getFixed(1));
1548 
1549   /// Returns true if \p I is a memory instruction in an interleaved-group
1550   /// of memory accesses that can be vectorized with wide vector loads/stores
1551   /// and shuffles.
1552   bool
1553   interleavedAccessCanBeWidened(Instruction *I,
1554                                 ElementCount VF = ElementCount::getFixed(1));
1555 
1556   /// Check if \p Instr belongs to any interleaved access group.
1557   bool isAccessInterleaved(Instruction *Instr) {
1558     return InterleaveInfo.isInterleaved(Instr);
1559   }
1560 
1561   /// Get the interleaved access group that \p Instr belongs to.
1562   const InterleaveGroup<Instruction> *
1563   getInterleavedAccessGroup(Instruction *Instr) {
1564     return InterleaveInfo.getInterleaveGroup(Instr);
1565   }
1566 
1567   /// Returns true if we're required to use a scalar epilogue for at least
1568   /// the final iteration of the original loop.
1569   bool requiresScalarEpilogue() const {
1570     if (!isScalarEpilogueAllowed())
1571       return false;
1572     // If we might exit from anywhere but the latch, must run the exiting
1573     // iteration in scalar form.
1574     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1575       return true;
1576     return InterleaveInfo.requiresScalarEpilogue();
1577   }
1578 
1579   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1580   /// loop hint annotation.
1581   bool isScalarEpilogueAllowed() const {
1582     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1583   }
1584 
1585   /// Returns true if all loop blocks should be masked to fold tail loop.
1586   bool foldTailByMasking() const { return FoldTailByMasking; }
1587 
1588   bool blockNeedsPredication(BasicBlock *BB) const {
1589     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1590   }
1591 
1592   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1593   /// nodes to the chain of instructions representing the reductions. Uses a
1594   /// MapVector to ensure deterministic iteration order.
1595   using ReductionChainMap =
1596       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1597 
1598   /// Return the chain of instructions representing an inloop reduction.
1599   const ReductionChainMap &getInLoopReductionChains() const {
1600     return InLoopReductionChains;
1601   }
1602 
1603   /// Returns true if the Phi is part of an inloop reduction.
1604   bool isInLoopReduction(PHINode *Phi) const {
1605     return InLoopReductionChains.count(Phi);
1606   }
1607 
1608   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1609   /// with factor VF.  Return the cost of the instruction, including
1610   /// scalarization overhead if it's needed.
1611   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1612 
1613   /// Estimate cost of a call instruction CI if it were vectorized with factor
1614   /// VF. Return the cost of the instruction, including scalarization overhead
1615   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1616   /// scalarized -
1617   /// i.e. either vector version isn't available, or is too expensive.
1618   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1619                                     bool &NeedToScalarize) const;
1620 
1621   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1622   /// that of B.
1623   bool isMoreProfitable(const VectorizationFactor &A,
1624                         const VectorizationFactor &B) const;
1625 
1626   /// Invalidates decisions already taken by the cost model.
1627   void invalidateCostModelingDecisions() {
1628     WideningDecisions.clear();
1629     Uniforms.clear();
1630     Scalars.clear();
1631   }
1632 
1633 private:
1634   unsigned NumPredStores = 0;
1635 
1636   /// \return An upper bound for the vectorization factors for both
1637   /// fixed and scalable vectorization, where the minimum-known number of
1638   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1639   /// disabled or unsupported, then the scalable part will be equal to
1640   /// ElementCount::getScalable(0).
1641   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1642                                            ElementCount UserVF);
1643 
1644   /// \return the maximized element count based on the targets vector
1645   /// registers and the loop trip-count, but limited to a maximum safe VF.
1646   /// This is a helper function of computeFeasibleMaxVF.
1647   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1648   /// issue that occurred on one of the buildbots which cannot be reproduced
1649   /// without having access to the properietary compiler (see comments on
1650   /// D98509). The issue is currently under investigation and this workaround
1651   /// will be removed as soon as possible.
1652   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1653                                        unsigned SmallestType,
1654                                        unsigned WidestType,
1655                                        const ElementCount &MaxSafeVF);
1656 
1657   /// \return the maximum legal scalable VF, based on the safe max number
1658   /// of elements.
1659   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1660 
1661   /// The vectorization cost is a combination of the cost itself and a boolean
1662   /// indicating whether any of the contributing operations will actually
1663   /// operate on
1664   /// vector values after type legalization in the backend. If this latter value
1665   /// is
1666   /// false, then all operations will be scalarized (i.e. no vectorization has
1667   /// actually taken place).
1668   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1669 
1670   /// Returns the expected execution cost. The unit of the cost does
1671   /// not matter because we use the 'cost' units to compare different
1672   /// vector widths. The cost that is returned is *not* normalized by
1673   /// the factor width.
1674   VectorizationCostTy expectedCost(ElementCount VF);
1675 
1676   /// Returns the execution time cost of an instruction for a given vector
1677   /// width. Vector width of one means scalar.
1678   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1679 
1680   /// The cost-computation logic from getInstructionCost which provides
1681   /// the vector type as an output parameter.
1682   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1683                                      Type *&VectorTy);
1684 
1685   /// Return the cost of instructions in an inloop reduction pattern, if I is
1686   /// part of that pattern.
1687   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1688                                           Type *VectorTy,
1689                                           TTI::TargetCostKind CostKind);
1690 
1691   /// Calculate vectorization cost of memory instruction \p I.
1692   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1693 
1694   /// The cost computation for scalarized memory instruction.
1695   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1696 
1697   /// The cost computation for interleaving group of memory instructions.
1698   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1699 
1700   /// The cost computation for Gather/Scatter instruction.
1701   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1702 
1703   /// The cost computation for widening instruction \p I with consecutive
1704   /// memory access.
1705   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1706 
1707   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1708   /// Load: scalar load + broadcast.
1709   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1710   /// element)
1711   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1712 
1713   /// Estimate the overhead of scalarizing an instruction. This is a
1714   /// convenience wrapper for the type-based getScalarizationOverhead API.
1715   InstructionCost getScalarizationOverhead(Instruction *I,
1716                                            ElementCount VF) const;
1717 
1718   /// Returns whether the instruction is a load or store and will be a emitted
1719   /// as a vector operation.
1720   bool isConsecutiveLoadOrStore(Instruction *I);
1721 
1722   /// Returns true if an artificially high cost for emulated masked memrefs
1723   /// should be used.
1724   bool useEmulatedMaskMemRefHack(Instruction *I);
1725 
1726   /// Map of scalar integer values to the smallest bitwidth they can be legally
1727   /// represented as. The vector equivalents of these values should be truncated
1728   /// to this type.
1729   MapVector<Instruction *, uint64_t> MinBWs;
1730 
1731   /// A type representing the costs for instructions if they were to be
1732   /// scalarized rather than vectorized. The entries are Instruction-Cost
1733   /// pairs.
1734   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1735 
1736   /// A set containing all BasicBlocks that are known to present after
1737   /// vectorization as a predicated block.
1738   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1739 
1740   /// Records whether it is allowed to have the original scalar loop execute at
1741   /// least once. This may be needed as a fallback loop in case runtime
1742   /// aliasing/dependence checks fail, or to handle the tail/remainder
1743   /// iterations when the trip count is unknown or doesn't divide by the VF,
1744   /// or as a peel-loop to handle gaps in interleave-groups.
1745   /// Under optsize and when the trip count is very small we don't allow any
1746   /// iterations to execute in the scalar loop.
1747   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1748 
1749   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1750   bool FoldTailByMasking = false;
1751 
1752   /// A map holding scalar costs for different vectorization factors. The
1753   /// presence of a cost for an instruction in the mapping indicates that the
1754   /// instruction will be scalarized when vectorizing with the associated
1755   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1756   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1757 
1758   /// Holds the instructions known to be uniform after vectorization.
1759   /// The data is collected per VF.
1760   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1761 
1762   /// Holds the instructions known to be scalar after vectorization.
1763   /// The data is collected per VF.
1764   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1765 
1766   /// Holds the instructions (address computations) that are forced to be
1767   /// scalarized.
1768   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1769 
1770   /// PHINodes of the reductions that should be expanded in-loop along with
1771   /// their associated chains of reduction operations, in program order from top
1772   /// (PHI) to bottom
1773   ReductionChainMap InLoopReductionChains;
1774 
1775   /// A Map of inloop reduction operations and their immediate chain operand.
1776   /// FIXME: This can be removed once reductions can be costed correctly in
1777   /// vplan. This was added to allow quick lookup to the inloop operations,
1778   /// without having to loop through InLoopReductionChains.
1779   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1780 
1781   /// Returns the expected difference in cost from scalarizing the expression
1782   /// feeding a predicated instruction \p PredInst. The instructions to
1783   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1784   /// non-negative return value implies the expression will be scalarized.
1785   /// Currently, only single-use chains are considered for scalarization.
1786   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1787                               ElementCount VF);
1788 
1789   /// Collect the instructions that are uniform after vectorization. An
1790   /// instruction is uniform if we represent it with a single scalar value in
1791   /// the vectorized loop corresponding to each vector iteration. Examples of
1792   /// uniform instructions include pointer operands of consecutive or
1793   /// interleaved memory accesses. Note that although uniformity implies an
1794   /// instruction will be scalar, the reverse is not true. In general, a
1795   /// scalarized instruction will be represented by VF scalar values in the
1796   /// vectorized loop, each corresponding to an iteration of the original
1797   /// scalar loop.
1798   void collectLoopUniforms(ElementCount VF);
1799 
1800   /// Collect the instructions that are scalar after vectorization. An
1801   /// instruction is scalar if it is known to be uniform or will be scalarized
1802   /// during vectorization. Non-uniform scalarized instructions will be
1803   /// represented by VF values in the vectorized loop, each corresponding to an
1804   /// iteration of the original scalar loop.
1805   void collectLoopScalars(ElementCount VF);
1806 
1807   /// Keeps cost model vectorization decision and cost for instructions.
1808   /// Right now it is used for memory instructions only.
1809   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1810                                 std::pair<InstWidening, InstructionCost>>;
1811 
1812   DecisionList WideningDecisions;
1813 
1814   /// Returns true if \p V is expected to be vectorized and it needs to be
1815   /// extracted.
1816   bool needsExtract(Value *V, ElementCount VF) const {
1817     Instruction *I = dyn_cast<Instruction>(V);
1818     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1819         TheLoop->isLoopInvariant(I))
1820       return false;
1821 
1822     // Assume we can vectorize V (and hence we need extraction) if the
1823     // scalars are not computed yet. This can happen, because it is called
1824     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1825     // the scalars are collected. That should be a safe assumption in most
1826     // cases, because we check if the operands have vectorizable types
1827     // beforehand in LoopVectorizationLegality.
1828     return Scalars.find(VF) == Scalars.end() ||
1829            !isScalarAfterVectorization(I, VF);
1830   };
1831 
1832   /// Returns a range containing only operands needing to be extracted.
1833   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1834                                                    ElementCount VF) const {
1835     return SmallVector<Value *, 4>(make_filter_range(
1836         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1837   }
1838 
1839   /// Determines if we have the infrastructure to vectorize loop \p L and its
1840   /// epilogue, assuming the main loop is vectorized by \p VF.
1841   bool isCandidateForEpilogueVectorization(const Loop &L,
1842                                            const ElementCount VF) const;
1843 
1844   /// Returns true if epilogue vectorization is considered profitable, and
1845   /// false otherwise.
1846   /// \p VF is the vectorization factor chosen for the original loop.
1847   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1848 
1849 public:
1850   /// The loop that we evaluate.
1851   Loop *TheLoop;
1852 
1853   /// Predicated scalar evolution analysis.
1854   PredicatedScalarEvolution &PSE;
1855 
1856   /// Loop Info analysis.
1857   LoopInfo *LI;
1858 
1859   /// Vectorization legality.
1860   LoopVectorizationLegality *Legal;
1861 
1862   /// Vector target information.
1863   const TargetTransformInfo &TTI;
1864 
1865   /// Target Library Info.
1866   const TargetLibraryInfo *TLI;
1867 
1868   /// Demanded bits analysis.
1869   DemandedBits *DB;
1870 
1871   /// Assumption cache.
1872   AssumptionCache *AC;
1873 
1874   /// Interface to emit optimization remarks.
1875   OptimizationRemarkEmitter *ORE;
1876 
1877   const Function *TheFunction;
1878 
1879   /// Loop Vectorize Hint.
1880   const LoopVectorizeHints *Hints;
1881 
1882   /// The interleave access information contains groups of interleaved accesses
1883   /// with the same stride and close to each other.
1884   InterleavedAccessInfo &InterleaveInfo;
1885 
1886   /// Values to ignore in the cost model.
1887   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1888 
1889   /// Values to ignore in the cost model when VF > 1.
1890   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1891 
1892   /// Profitable vector factors.
1893   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1894 };
1895 } // end namespace llvm
1896 
1897 /// Helper struct to manage generating runtime checks for vectorization.
1898 ///
1899 /// The runtime checks are created up-front in temporary blocks to allow better
1900 /// estimating the cost and un-linked from the existing IR. After deciding to
1901 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1902 /// temporary blocks are completely removed.
1903 class GeneratedRTChecks {
1904   /// Basic block which contains the generated SCEV checks, if any.
1905   BasicBlock *SCEVCheckBlock = nullptr;
1906 
1907   /// The value representing the result of the generated SCEV checks. If it is
1908   /// nullptr, either no SCEV checks have been generated or they have been used.
1909   Value *SCEVCheckCond = nullptr;
1910 
1911   /// Basic block which contains the generated memory runtime checks, if any.
1912   BasicBlock *MemCheckBlock = nullptr;
1913 
1914   /// The value representing the result of the generated memory runtime checks.
1915   /// If it is nullptr, either no memory runtime checks have been generated or
1916   /// they have been used.
1917   Instruction *MemRuntimeCheckCond = nullptr;
1918 
1919   DominatorTree *DT;
1920   LoopInfo *LI;
1921 
1922   SCEVExpander SCEVExp;
1923   SCEVExpander MemCheckExp;
1924 
1925 public:
1926   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1927                     const DataLayout &DL)
1928       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1929         MemCheckExp(SE, DL, "scev.check") {}
1930 
1931   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1932   /// accurately estimate the cost of the runtime checks. The blocks are
1933   /// un-linked from the IR and is added back during vector code generation. If
1934   /// there is no vector code generation, the check blocks are removed
1935   /// completely.
1936   void Create(Loop *L, const LoopAccessInfo &LAI,
1937               const SCEVUnionPredicate &UnionPred) {
1938 
1939     BasicBlock *LoopHeader = L->getHeader();
1940     BasicBlock *Preheader = L->getLoopPreheader();
1941 
1942     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1943     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1944     // may be used by SCEVExpander. The blocks will be un-linked from their
1945     // predecessors and removed from LI & DT at the end of the function.
1946     if (!UnionPred.isAlwaysTrue()) {
1947       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1948                                   nullptr, "vector.scevcheck");
1949 
1950       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1951           &UnionPred, SCEVCheckBlock->getTerminator());
1952     }
1953 
1954     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1955     if (RtPtrChecking.Need) {
1956       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1957       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1958                                  "vector.memcheck");
1959 
1960       std::tie(std::ignore, MemRuntimeCheckCond) =
1961           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1962                            RtPtrChecking.getChecks(), MemCheckExp);
1963       assert(MemRuntimeCheckCond &&
1964              "no RT checks generated although RtPtrChecking "
1965              "claimed checks are required");
1966     }
1967 
1968     if (!MemCheckBlock && !SCEVCheckBlock)
1969       return;
1970 
1971     // Unhook the temporary block with the checks, update various places
1972     // accordingly.
1973     if (SCEVCheckBlock)
1974       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1975     if (MemCheckBlock)
1976       MemCheckBlock->replaceAllUsesWith(Preheader);
1977 
1978     if (SCEVCheckBlock) {
1979       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1980       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1981       Preheader->getTerminator()->eraseFromParent();
1982     }
1983     if (MemCheckBlock) {
1984       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1985       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1986       Preheader->getTerminator()->eraseFromParent();
1987     }
1988 
1989     DT->changeImmediateDominator(LoopHeader, Preheader);
1990     if (MemCheckBlock) {
1991       DT->eraseNode(MemCheckBlock);
1992       LI->removeBlock(MemCheckBlock);
1993     }
1994     if (SCEVCheckBlock) {
1995       DT->eraseNode(SCEVCheckBlock);
1996       LI->removeBlock(SCEVCheckBlock);
1997     }
1998   }
1999 
2000   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2001   /// unused.
2002   ~GeneratedRTChecks() {
2003     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
2004     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
2005     if (!SCEVCheckCond)
2006       SCEVCleaner.markResultUsed();
2007 
2008     if (!MemRuntimeCheckCond)
2009       MemCheckCleaner.markResultUsed();
2010 
2011     if (MemRuntimeCheckCond) {
2012       auto &SE = *MemCheckExp.getSE();
2013       // Memory runtime check generation creates compares that use expanded
2014       // values. Remove them before running the SCEVExpanderCleaners.
2015       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2016         if (MemCheckExp.isInsertedInstruction(&I))
2017           continue;
2018         SE.forgetValue(&I);
2019         SE.eraseValueFromMap(&I);
2020         I.eraseFromParent();
2021       }
2022     }
2023     MemCheckCleaner.cleanup();
2024     SCEVCleaner.cleanup();
2025 
2026     if (SCEVCheckCond)
2027       SCEVCheckBlock->eraseFromParent();
2028     if (MemRuntimeCheckCond)
2029       MemCheckBlock->eraseFromParent();
2030   }
2031 
2032   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2033   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2034   /// depending on the generated condition.
2035   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2036                              BasicBlock *LoopVectorPreHeader,
2037                              BasicBlock *LoopExitBlock) {
2038     if (!SCEVCheckCond)
2039       return nullptr;
2040     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2041       if (C->isZero())
2042         return nullptr;
2043 
2044     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2045 
2046     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2047     // Create new preheader for vector loop.
2048     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2049       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2050 
2051     SCEVCheckBlock->getTerminator()->eraseFromParent();
2052     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2053     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2054                                                 SCEVCheckBlock);
2055 
2056     DT->addNewBlock(SCEVCheckBlock, Pred);
2057     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2058 
2059     ReplaceInstWithInst(
2060         SCEVCheckBlock->getTerminator(),
2061         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2062     // Mark the check as used, to prevent it from being removed during cleanup.
2063     SCEVCheckCond = nullptr;
2064     return SCEVCheckBlock;
2065   }
2066 
2067   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2068   /// the branches to branch to the vector preheader or \p Bypass, depending on
2069   /// the generated condition.
2070   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2071                                    BasicBlock *LoopVectorPreHeader) {
2072     // Check if we generated code that checks in runtime if arrays overlap.
2073     if (!MemRuntimeCheckCond)
2074       return nullptr;
2075 
2076     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2077     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2078                                                 MemCheckBlock);
2079 
2080     DT->addNewBlock(MemCheckBlock, Pred);
2081     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2082     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2083 
2084     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2085       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2086 
2087     ReplaceInstWithInst(
2088         MemCheckBlock->getTerminator(),
2089         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2090     MemCheckBlock->getTerminator()->setDebugLoc(
2091         Pred->getTerminator()->getDebugLoc());
2092 
2093     // Mark the check as used, to prevent it from being removed during cleanup.
2094     MemRuntimeCheckCond = nullptr;
2095     return MemCheckBlock;
2096   }
2097 };
2098 
2099 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2100 // vectorization. The loop needs to be annotated with #pragma omp simd
2101 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2102 // vector length information is not provided, vectorization is not considered
2103 // explicit. Interleave hints are not allowed either. These limitations will be
2104 // relaxed in the future.
2105 // Please, note that we are currently forced to abuse the pragma 'clang
2106 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2107 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2108 // provides *explicit vectorization hints* (LV can bypass legal checks and
2109 // assume that vectorization is legal). However, both hints are implemented
2110 // using the same metadata (llvm.loop.vectorize, processed by
2111 // LoopVectorizeHints). This will be fixed in the future when the native IR
2112 // representation for pragma 'omp simd' is introduced.
2113 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2114                                    OptimizationRemarkEmitter *ORE) {
2115   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2116   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2117 
2118   // Only outer loops with an explicit vectorization hint are supported.
2119   // Unannotated outer loops are ignored.
2120   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2121     return false;
2122 
2123   Function *Fn = OuterLp->getHeader()->getParent();
2124   if (!Hints.allowVectorization(Fn, OuterLp,
2125                                 true /*VectorizeOnlyWhenForced*/)) {
2126     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2127     return false;
2128   }
2129 
2130   if (Hints.getInterleave() > 1) {
2131     // TODO: Interleave support is future work.
2132     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2133                          "outer loops.\n");
2134     Hints.emitRemarkWithHints();
2135     return false;
2136   }
2137 
2138   return true;
2139 }
2140 
2141 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2142                                   OptimizationRemarkEmitter *ORE,
2143                                   SmallVectorImpl<Loop *> &V) {
2144   // Collect inner loops and outer loops without irreducible control flow. For
2145   // now, only collect outer loops that have explicit vectorization hints. If we
2146   // are stress testing the VPlan H-CFG construction, we collect the outermost
2147   // loop of every loop nest.
2148   if (L.isInnermost() || VPlanBuildStressTest ||
2149       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2150     LoopBlocksRPO RPOT(&L);
2151     RPOT.perform(LI);
2152     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2153       V.push_back(&L);
2154       // TODO: Collect inner loops inside marked outer loops in case
2155       // vectorization fails for the outer loop. Do not invoke
2156       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2157       // already known to be reducible. We can use an inherited attribute for
2158       // that.
2159       return;
2160     }
2161   }
2162   for (Loop *InnerL : L)
2163     collectSupportedLoops(*InnerL, LI, ORE, V);
2164 }
2165 
2166 namespace {
2167 
2168 /// The LoopVectorize Pass.
2169 struct LoopVectorize : public FunctionPass {
2170   /// Pass identification, replacement for typeid
2171   static char ID;
2172 
2173   LoopVectorizePass Impl;
2174 
2175   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2176                          bool VectorizeOnlyWhenForced = false)
2177       : FunctionPass(ID),
2178         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2179     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2180   }
2181 
2182   bool runOnFunction(Function &F) override {
2183     if (skipFunction(F))
2184       return false;
2185 
2186     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2187     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2188     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2189     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2190     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2191     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2192     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2193     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2194     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2195     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2196     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2197     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2198     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2199 
2200     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2201         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2202 
2203     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2204                         GetLAA, *ORE, PSI).MadeAnyChange;
2205   }
2206 
2207   void getAnalysisUsage(AnalysisUsage &AU) const override {
2208     AU.addRequired<AssumptionCacheTracker>();
2209     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2210     AU.addRequired<DominatorTreeWrapperPass>();
2211     AU.addRequired<LoopInfoWrapperPass>();
2212     AU.addRequired<ScalarEvolutionWrapperPass>();
2213     AU.addRequired<TargetTransformInfoWrapperPass>();
2214     AU.addRequired<AAResultsWrapperPass>();
2215     AU.addRequired<LoopAccessLegacyAnalysis>();
2216     AU.addRequired<DemandedBitsWrapperPass>();
2217     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2218     AU.addRequired<InjectTLIMappingsLegacy>();
2219 
2220     // We currently do not preserve loopinfo/dominator analyses with outer loop
2221     // vectorization. Until this is addressed, mark these analyses as preserved
2222     // only for non-VPlan-native path.
2223     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2224     if (!EnableVPlanNativePath) {
2225       AU.addPreserved<LoopInfoWrapperPass>();
2226       AU.addPreserved<DominatorTreeWrapperPass>();
2227     }
2228 
2229     AU.addPreserved<BasicAAWrapperPass>();
2230     AU.addPreserved<GlobalsAAWrapperPass>();
2231     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2232   }
2233 };
2234 
2235 } // end anonymous namespace
2236 
2237 //===----------------------------------------------------------------------===//
2238 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2239 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2240 //===----------------------------------------------------------------------===//
2241 
2242 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2243   // We need to place the broadcast of invariant variables outside the loop,
2244   // but only if it's proven safe to do so. Else, broadcast will be inside
2245   // vector loop body.
2246   Instruction *Instr = dyn_cast<Instruction>(V);
2247   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2248                      (!Instr ||
2249                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2250   // Place the code for broadcasting invariant variables in the new preheader.
2251   IRBuilder<>::InsertPointGuard Guard(Builder);
2252   if (SafeToHoist)
2253     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2254 
2255   // Broadcast the scalar into all locations in the vector.
2256   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2257 
2258   return Shuf;
2259 }
2260 
2261 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2262     const InductionDescriptor &II, Value *Step, Value *Start,
2263     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2264     VPTransformState &State) {
2265   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2266          "Expected either an induction phi-node or a truncate of it!");
2267 
2268   // Construct the initial value of the vector IV in the vector loop preheader
2269   auto CurrIP = Builder.saveIP();
2270   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2271   if (isa<TruncInst>(EntryVal)) {
2272     assert(Start->getType()->isIntegerTy() &&
2273            "Truncation requires an integer type");
2274     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2275     Step = Builder.CreateTrunc(Step, TruncType);
2276     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2277   }
2278   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2279   Value *SteppedStart =
2280       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2281 
2282   // We create vector phi nodes for both integer and floating-point induction
2283   // variables. Here, we determine the kind of arithmetic we will perform.
2284   Instruction::BinaryOps AddOp;
2285   Instruction::BinaryOps MulOp;
2286   if (Step->getType()->isIntegerTy()) {
2287     AddOp = Instruction::Add;
2288     MulOp = Instruction::Mul;
2289   } else {
2290     AddOp = II.getInductionOpcode();
2291     MulOp = Instruction::FMul;
2292   }
2293 
2294   // Multiply the vectorization factor by the step using integer or
2295   // floating-point arithmetic as appropriate.
2296   Type *StepType = Step->getType();
2297   if (Step->getType()->isFloatingPointTy())
2298     StepType = IntegerType::get(StepType->getContext(),
2299                                 StepType->getScalarSizeInBits());
2300   Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2301   if (Step->getType()->isFloatingPointTy())
2302     RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType());
2303   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2304 
2305   // Create a vector splat to use in the induction update.
2306   //
2307   // FIXME: If the step is non-constant, we create the vector splat with
2308   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2309   //        handle a constant vector splat.
2310   Value *SplatVF = isa<Constant>(Mul)
2311                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2312                        : Builder.CreateVectorSplat(VF, Mul);
2313   Builder.restoreIP(CurrIP);
2314 
2315   // We may need to add the step a number of times, depending on the unroll
2316   // factor. The last of those goes into the PHI.
2317   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2318                                     &*LoopVectorBody->getFirstInsertionPt());
2319   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2320   Instruction *LastInduction = VecInd;
2321   for (unsigned Part = 0; Part < UF; ++Part) {
2322     State.set(Def, LastInduction, Part);
2323 
2324     if (isa<TruncInst>(EntryVal))
2325       addMetadata(LastInduction, EntryVal);
2326     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2327                                           State, Part);
2328 
2329     LastInduction = cast<Instruction>(
2330         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2331     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2332   }
2333 
2334   // Move the last step to the end of the latch block. This ensures consistent
2335   // placement of all induction updates.
2336   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2337   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2338   auto *ICmp = cast<Instruction>(Br->getCondition());
2339   LastInduction->moveBefore(ICmp);
2340   LastInduction->setName("vec.ind.next");
2341 
2342   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2343   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2344 }
2345 
2346 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2347   return Cost->isScalarAfterVectorization(I, VF) ||
2348          Cost->isProfitableToScalarize(I, VF);
2349 }
2350 
2351 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2352   if (shouldScalarizeInstruction(IV))
2353     return true;
2354   auto isScalarInst = [&](User *U) -> bool {
2355     auto *I = cast<Instruction>(U);
2356     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2357   };
2358   return llvm::any_of(IV->users(), isScalarInst);
2359 }
2360 
2361 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2362     const InductionDescriptor &ID, const Instruction *EntryVal,
2363     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2364     unsigned Part, unsigned Lane) {
2365   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2366          "Expected either an induction phi-node or a truncate of it!");
2367 
2368   // This induction variable is not the phi from the original loop but the
2369   // newly-created IV based on the proof that casted Phi is equal to the
2370   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2371   // re-uses the same InductionDescriptor that original IV uses but we don't
2372   // have to do any recording in this case - that is done when original IV is
2373   // processed.
2374   if (isa<TruncInst>(EntryVal))
2375     return;
2376 
2377   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2378   if (Casts.empty())
2379     return;
2380   // Only the first Cast instruction in the Casts vector is of interest.
2381   // The rest of the Casts (if exist) have no uses outside the
2382   // induction update chain itself.
2383   if (Lane < UINT_MAX)
2384     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2385   else
2386     State.set(CastDef, VectorLoopVal, Part);
2387 }
2388 
2389 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2390                                                 TruncInst *Trunc, VPValue *Def,
2391                                                 VPValue *CastDef,
2392                                                 VPTransformState &State) {
2393   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2394          "Primary induction variable must have an integer type");
2395 
2396   auto II = Legal->getInductionVars().find(IV);
2397   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2398 
2399   auto ID = II->second;
2400   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2401 
2402   // The value from the original loop to which we are mapping the new induction
2403   // variable.
2404   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2405 
2406   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2407 
2408   // Generate code for the induction step. Note that induction steps are
2409   // required to be loop-invariant
2410   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2411     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2412            "Induction step should be loop invariant");
2413     if (PSE.getSE()->isSCEVable(IV->getType())) {
2414       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2415       return Exp.expandCodeFor(Step, Step->getType(),
2416                                LoopVectorPreHeader->getTerminator());
2417     }
2418     return cast<SCEVUnknown>(Step)->getValue();
2419   };
2420 
2421   // The scalar value to broadcast. This is derived from the canonical
2422   // induction variable. If a truncation type is given, truncate the canonical
2423   // induction variable and step. Otherwise, derive these values from the
2424   // induction descriptor.
2425   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2426     Value *ScalarIV = Induction;
2427     if (IV != OldInduction) {
2428       ScalarIV = IV->getType()->isIntegerTy()
2429                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2430                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2431                                           IV->getType());
2432       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2433       ScalarIV->setName("offset.idx");
2434     }
2435     if (Trunc) {
2436       auto *TruncType = cast<IntegerType>(Trunc->getType());
2437       assert(Step->getType()->isIntegerTy() &&
2438              "Truncation requires an integer step");
2439       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2440       Step = Builder.CreateTrunc(Step, TruncType);
2441     }
2442     return ScalarIV;
2443   };
2444 
2445   // Create the vector values from the scalar IV, in the absence of creating a
2446   // vector IV.
2447   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2448     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2449     for (unsigned Part = 0; Part < UF; ++Part) {
2450       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2451       Value *EntryPart =
2452           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2453                         ID.getInductionOpcode());
2454       State.set(Def, EntryPart, Part);
2455       if (Trunc)
2456         addMetadata(EntryPart, Trunc);
2457       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2458                                             State, Part);
2459     }
2460   };
2461 
2462   // Fast-math-flags propagate from the original induction instruction.
2463   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2464   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2465     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2466 
2467   // Now do the actual transformations, and start with creating the step value.
2468   Value *Step = CreateStepValue(ID.getStep());
2469   if (VF.isZero() || VF.isScalar()) {
2470     Value *ScalarIV = CreateScalarIV(Step);
2471     CreateSplatIV(ScalarIV, Step);
2472     return;
2473   }
2474 
2475   // Determine if we want a scalar version of the induction variable. This is
2476   // true if the induction variable itself is not widened, or if it has at
2477   // least one user in the loop that is not widened.
2478   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2479   if (!NeedsScalarIV) {
2480     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2481                                     State);
2482     return;
2483   }
2484 
2485   // Try to create a new independent vector induction variable. If we can't
2486   // create the phi node, we will splat the scalar induction variable in each
2487   // loop iteration.
2488   if (!shouldScalarizeInstruction(EntryVal)) {
2489     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2490                                     State);
2491     Value *ScalarIV = CreateScalarIV(Step);
2492     // Create scalar steps that can be used by instructions we will later
2493     // scalarize. Note that the addition of the scalar steps will not increase
2494     // the number of instructions in the loop in the common case prior to
2495     // InstCombine. We will be trading one vector extract for each scalar step.
2496     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2497     return;
2498   }
2499 
2500   // All IV users are scalar instructions, so only emit a scalar IV, not a
2501   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2502   // predicate used by the masked loads/stores.
2503   Value *ScalarIV = CreateScalarIV(Step);
2504   if (!Cost->isScalarEpilogueAllowed())
2505     CreateSplatIV(ScalarIV, Step);
2506   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2507 }
2508 
2509 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2510                                           Instruction::BinaryOps BinOp) {
2511   // Create and check the types.
2512   auto *ValVTy = cast<VectorType>(Val->getType());
2513   ElementCount VLen = ValVTy->getElementCount();
2514 
2515   Type *STy = Val->getType()->getScalarType();
2516   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2517          "Induction Step must be an integer or FP");
2518   assert(Step->getType() == STy && "Step has wrong type");
2519 
2520   SmallVector<Constant *, 8> Indices;
2521 
2522   // Create a vector of consecutive numbers from zero to VF.
2523   VectorType *InitVecValVTy = ValVTy;
2524   Type *InitVecValSTy = STy;
2525   if (STy->isFloatingPointTy()) {
2526     InitVecValSTy =
2527         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2528     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2529   }
2530   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2531 
2532   // Add on StartIdx
2533   Value *StartIdxSplat = Builder.CreateVectorSplat(
2534       VLen, ConstantInt::get(InitVecValSTy, StartIdx));
2535   InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2536 
2537   if (STy->isIntegerTy()) {
2538     Step = Builder.CreateVectorSplat(VLen, Step);
2539     assert(Step->getType() == Val->getType() && "Invalid step vec");
2540     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2541     // which can be found from the original scalar operations.
2542     Step = Builder.CreateMul(InitVec, Step);
2543     return Builder.CreateAdd(Val, Step, "induction");
2544   }
2545 
2546   // Floating point induction.
2547   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2548          "Binary Opcode should be specified for FP induction");
2549   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2550   Step = Builder.CreateVectorSplat(VLen, Step);
2551   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2552   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2553 }
2554 
2555 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2556                                            Instruction *EntryVal,
2557                                            const InductionDescriptor &ID,
2558                                            VPValue *Def, VPValue *CastDef,
2559                                            VPTransformState &State) {
2560   // We shouldn't have to build scalar steps if we aren't vectorizing.
2561   assert(VF.isVector() && "VF should be greater than one");
2562   // Get the value type and ensure it and the step have the same integer type.
2563   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2564   assert(ScalarIVTy == Step->getType() &&
2565          "Val and Step should have the same type");
2566 
2567   // We build scalar steps for both integer and floating-point induction
2568   // variables. Here, we determine the kind of arithmetic we will perform.
2569   Instruction::BinaryOps AddOp;
2570   Instruction::BinaryOps MulOp;
2571   if (ScalarIVTy->isIntegerTy()) {
2572     AddOp = Instruction::Add;
2573     MulOp = Instruction::Mul;
2574   } else {
2575     AddOp = ID.getInductionOpcode();
2576     MulOp = Instruction::FMul;
2577   }
2578 
2579   // Determine the number of scalars we need to generate for each unroll
2580   // iteration. If EntryVal is uniform, we only need to generate the first
2581   // lane. Otherwise, we generate all VF values.
2582   bool IsUniform =
2583       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2584   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2585   // Compute the scalar steps and save the results in State.
2586   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2587                                      ScalarIVTy->getScalarSizeInBits());
2588   Type *VecIVTy = nullptr;
2589   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2590   if (!IsUniform && VF.isScalable()) {
2591     VecIVTy = VectorType::get(ScalarIVTy, VF);
2592     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2593     SplatStep = Builder.CreateVectorSplat(VF, Step);
2594     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2595   }
2596 
2597   for (unsigned Part = 0; Part < UF; ++Part) {
2598     Value *StartIdx0 =
2599         createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2600 
2601     if (!IsUniform && VF.isScalable()) {
2602       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2603       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2604       if (ScalarIVTy->isFloatingPointTy())
2605         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2606       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2607       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2608       State.set(Def, Add, Part);
2609       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2610                                             Part);
2611       // It's useful to record the lane values too for the known minimum number
2612       // of elements so we do those below. This improves the code quality when
2613       // trying to extract the first element, for example.
2614     }
2615 
2616     if (ScalarIVTy->isFloatingPointTy())
2617       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2618 
2619     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2620       Value *StartIdx = Builder.CreateBinOp(
2621           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2622       // The step returned by `createStepForVF` is a runtime-evaluated value
2623       // when VF is scalable. Otherwise, it should be folded into a Constant.
2624       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2625              "Expected StartIdx to be folded to a constant when VF is not "
2626              "scalable");
2627       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2628       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2629       State.set(Def, Add, VPIteration(Part, Lane));
2630       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2631                                             Part, Lane);
2632     }
2633   }
2634 }
2635 
2636 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2637                                                     const VPIteration &Instance,
2638                                                     VPTransformState &State) {
2639   Value *ScalarInst = State.get(Def, Instance);
2640   Value *VectorValue = State.get(Def, Instance.Part);
2641   VectorValue = Builder.CreateInsertElement(
2642       VectorValue, ScalarInst,
2643       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2644   State.set(Def, VectorValue, Instance.Part);
2645 }
2646 
2647 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2648   assert(Vec->getType()->isVectorTy() && "Invalid type");
2649   return Builder.CreateVectorReverse(Vec, "reverse");
2650 }
2651 
2652 // Return whether we allow using masked interleave-groups (for dealing with
2653 // strided loads/stores that reside in predicated blocks, or for dealing
2654 // with gaps).
2655 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2656   // If an override option has been passed in for interleaved accesses, use it.
2657   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2658     return EnableMaskedInterleavedMemAccesses;
2659 
2660   return TTI.enableMaskedInterleavedAccessVectorization();
2661 }
2662 
2663 // Try to vectorize the interleave group that \p Instr belongs to.
2664 //
2665 // E.g. Translate following interleaved load group (factor = 3):
2666 //   for (i = 0; i < N; i+=3) {
2667 //     R = Pic[i];             // Member of index 0
2668 //     G = Pic[i+1];           // Member of index 1
2669 //     B = Pic[i+2];           // Member of index 2
2670 //     ... // do something to R, G, B
2671 //   }
2672 // To:
2673 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2674 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2675 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2676 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2677 //
2678 // Or translate following interleaved store group (factor = 3):
2679 //   for (i = 0; i < N; i+=3) {
2680 //     ... do something to R, G, B
2681 //     Pic[i]   = R;           // Member of index 0
2682 //     Pic[i+1] = G;           // Member of index 1
2683 //     Pic[i+2] = B;           // Member of index 2
2684 //   }
2685 // To:
2686 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2687 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2688 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2689 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2690 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2691 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2692     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2693     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2694     VPValue *BlockInMask) {
2695   Instruction *Instr = Group->getInsertPos();
2696   const DataLayout &DL = Instr->getModule()->getDataLayout();
2697 
2698   // Prepare for the vector type of the interleaved load/store.
2699   Type *ScalarTy = getLoadStoreType(Instr);
2700   unsigned InterleaveFactor = Group->getFactor();
2701   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2702   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2703 
2704   // Prepare for the new pointers.
2705   SmallVector<Value *, 2> AddrParts;
2706   unsigned Index = Group->getIndex(Instr);
2707 
2708   // TODO: extend the masked interleaved-group support to reversed access.
2709   assert((!BlockInMask || !Group->isReverse()) &&
2710          "Reversed masked interleave-group not supported.");
2711 
2712   // If the group is reverse, adjust the index to refer to the last vector lane
2713   // instead of the first. We adjust the index from the first vector lane,
2714   // rather than directly getting the pointer for lane VF - 1, because the
2715   // pointer operand of the interleaved access is supposed to be uniform. For
2716   // uniform instructions, we're only required to generate a value for the
2717   // first vector lane in each unroll iteration.
2718   if (Group->isReverse())
2719     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2720 
2721   for (unsigned Part = 0; Part < UF; Part++) {
2722     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2723     setDebugLocFromInst(Builder, AddrPart);
2724 
2725     // Notice current instruction could be any index. Need to adjust the address
2726     // to the member of index 0.
2727     //
2728     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2729     //       b = A[i];       // Member of index 0
2730     // Current pointer is pointed to A[i+1], adjust it to A[i].
2731     //
2732     // E.g.  A[i+1] = a;     // Member of index 1
2733     //       A[i]   = b;     // Member of index 0
2734     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2735     // Current pointer is pointed to A[i+2], adjust it to A[i].
2736 
2737     bool InBounds = false;
2738     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2739       InBounds = gep->isInBounds();
2740     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2741     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2742 
2743     // Cast to the vector pointer type.
2744     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2745     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2746     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2747   }
2748 
2749   setDebugLocFromInst(Builder, Instr);
2750   Value *PoisonVec = PoisonValue::get(VecTy);
2751 
2752   Value *MaskForGaps = nullptr;
2753   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2754     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2755     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2756   }
2757 
2758   // Vectorize the interleaved load group.
2759   if (isa<LoadInst>(Instr)) {
2760     // For each unroll part, create a wide load for the group.
2761     SmallVector<Value *, 2> NewLoads;
2762     for (unsigned Part = 0; Part < UF; Part++) {
2763       Instruction *NewLoad;
2764       if (BlockInMask || MaskForGaps) {
2765         assert(useMaskedInterleavedAccesses(*TTI) &&
2766                "masked interleaved groups are not allowed.");
2767         Value *GroupMask = MaskForGaps;
2768         if (BlockInMask) {
2769           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2770           Value *ShuffledMask = Builder.CreateShuffleVector(
2771               BlockInMaskPart,
2772               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2773               "interleaved.mask");
2774           GroupMask = MaskForGaps
2775                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2776                                                 MaskForGaps)
2777                           : ShuffledMask;
2778         }
2779         NewLoad =
2780             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2781                                      GroupMask, PoisonVec, "wide.masked.vec");
2782       }
2783       else
2784         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2785                                             Group->getAlign(), "wide.vec");
2786       Group->addMetadata(NewLoad);
2787       NewLoads.push_back(NewLoad);
2788     }
2789 
2790     // For each member in the group, shuffle out the appropriate data from the
2791     // wide loads.
2792     unsigned J = 0;
2793     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2794       Instruction *Member = Group->getMember(I);
2795 
2796       // Skip the gaps in the group.
2797       if (!Member)
2798         continue;
2799 
2800       auto StrideMask =
2801           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2802       for (unsigned Part = 0; Part < UF; Part++) {
2803         Value *StridedVec = Builder.CreateShuffleVector(
2804             NewLoads[Part], StrideMask, "strided.vec");
2805 
2806         // If this member has different type, cast the result type.
2807         if (Member->getType() != ScalarTy) {
2808           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2809           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2810           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2811         }
2812 
2813         if (Group->isReverse())
2814           StridedVec = reverseVector(StridedVec);
2815 
2816         State.set(VPDefs[J], StridedVec, Part);
2817       }
2818       ++J;
2819     }
2820     return;
2821   }
2822 
2823   // The sub vector type for current instruction.
2824   auto *SubVT = VectorType::get(ScalarTy, VF);
2825 
2826   // Vectorize the interleaved store group.
2827   for (unsigned Part = 0; Part < UF; Part++) {
2828     // Collect the stored vector from each member.
2829     SmallVector<Value *, 4> StoredVecs;
2830     for (unsigned i = 0; i < InterleaveFactor; i++) {
2831       // Interleaved store group doesn't allow a gap, so each index has a member
2832       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2833 
2834       Value *StoredVec = State.get(StoredValues[i], Part);
2835 
2836       if (Group->isReverse())
2837         StoredVec = reverseVector(StoredVec);
2838 
2839       // If this member has different type, cast it to a unified type.
2840 
2841       if (StoredVec->getType() != SubVT)
2842         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2843 
2844       StoredVecs.push_back(StoredVec);
2845     }
2846 
2847     // Concatenate all vectors into a wide vector.
2848     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2849 
2850     // Interleave the elements in the wide vector.
2851     Value *IVec = Builder.CreateShuffleVector(
2852         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2853         "interleaved.vec");
2854 
2855     Instruction *NewStoreInstr;
2856     if (BlockInMask) {
2857       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2858       Value *ShuffledMask = Builder.CreateShuffleVector(
2859           BlockInMaskPart,
2860           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2861           "interleaved.mask");
2862       NewStoreInstr = Builder.CreateMaskedStore(
2863           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2864     }
2865     else
2866       NewStoreInstr =
2867           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2868 
2869     Group->addMetadata(NewStoreInstr);
2870   }
2871 }
2872 
2873 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2874     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2875     VPValue *StoredValue, VPValue *BlockInMask) {
2876   // Attempt to issue a wide load.
2877   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2878   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2879 
2880   assert((LI || SI) && "Invalid Load/Store instruction");
2881   assert((!SI || StoredValue) && "No stored value provided for widened store");
2882   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2883 
2884   LoopVectorizationCostModel::InstWidening Decision =
2885       Cost->getWideningDecision(Instr, VF);
2886   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2887           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2888           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2889          "CM decision is not to widen the memory instruction");
2890 
2891   Type *ScalarDataTy = getLoadStoreType(Instr);
2892 
2893   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2894   const Align Alignment = getLoadStoreAlignment(Instr);
2895 
2896   // Determine if the pointer operand of the access is either consecutive or
2897   // reverse consecutive.
2898   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2899   bool ConsecutiveStride =
2900       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2901   bool CreateGatherScatter =
2902       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2903 
2904   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2905   // gather/scatter. Otherwise Decision should have been to Scalarize.
2906   assert((ConsecutiveStride || CreateGatherScatter) &&
2907          "The instruction should be scalarized");
2908   (void)ConsecutiveStride;
2909 
2910   VectorParts BlockInMaskParts(UF);
2911   bool isMaskRequired = BlockInMask;
2912   if (isMaskRequired)
2913     for (unsigned Part = 0; Part < UF; ++Part)
2914       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2915 
2916   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2917     // Calculate the pointer for the specific unroll-part.
2918     GetElementPtrInst *PartPtr = nullptr;
2919 
2920     bool InBounds = false;
2921     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2922       InBounds = gep->isInBounds();
2923     if (Reverse) {
2924       // If the address is consecutive but reversed, then the
2925       // wide store needs to start at the last vector element.
2926       // RunTimeVF =  VScale * VF.getKnownMinValue()
2927       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
2928       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF);
2929       // NumElt = -Part * RunTimeVF
2930       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
2931       // LastLane = 1 - RunTimeVF
2932       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
2933       PartPtr =
2934           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
2935       PartPtr->setIsInBounds(InBounds);
2936       PartPtr = cast<GetElementPtrInst>(
2937           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
2938       PartPtr->setIsInBounds(InBounds);
2939       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2940         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2941     } else {
2942       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2943       PartPtr = cast<GetElementPtrInst>(
2944           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2945       PartPtr->setIsInBounds(InBounds);
2946     }
2947 
2948     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2949     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2950   };
2951 
2952   // Handle Stores:
2953   if (SI) {
2954     setDebugLocFromInst(Builder, SI);
2955 
2956     for (unsigned Part = 0; Part < UF; ++Part) {
2957       Instruction *NewSI = nullptr;
2958       Value *StoredVal = State.get(StoredValue, Part);
2959       if (CreateGatherScatter) {
2960         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2961         Value *VectorGep = State.get(Addr, Part);
2962         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2963                                             MaskPart);
2964       } else {
2965         if (Reverse) {
2966           // If we store to reverse consecutive memory locations, then we need
2967           // to reverse the order of elements in the stored value.
2968           StoredVal = reverseVector(StoredVal);
2969           // We don't want to update the value in the map as it might be used in
2970           // another expression. So don't call resetVectorValue(StoredVal).
2971         }
2972         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2973         if (isMaskRequired)
2974           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2975                                             BlockInMaskParts[Part]);
2976         else
2977           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2978       }
2979       addMetadata(NewSI, SI);
2980     }
2981     return;
2982   }
2983 
2984   // Handle loads.
2985   assert(LI && "Must have a load instruction");
2986   setDebugLocFromInst(Builder, LI);
2987   for (unsigned Part = 0; Part < UF; ++Part) {
2988     Value *NewLI;
2989     if (CreateGatherScatter) {
2990       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2991       Value *VectorGep = State.get(Addr, Part);
2992       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2993                                          nullptr, "wide.masked.gather");
2994       addMetadata(NewLI, LI);
2995     } else {
2996       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2997       if (isMaskRequired)
2998         NewLI = Builder.CreateMaskedLoad(
2999             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
3000             "wide.masked.load");
3001       else
3002         NewLI =
3003             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
3004 
3005       // Add metadata to the load, but setVectorValue to the reverse shuffle.
3006       addMetadata(NewLI, LI);
3007       if (Reverse)
3008         NewLI = reverseVector(NewLI);
3009     }
3010 
3011     State.set(Def, NewLI, Part);
3012   }
3013 }
3014 
3015 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
3016                                                VPUser &User,
3017                                                const VPIteration &Instance,
3018                                                bool IfPredicateInstr,
3019                                                VPTransformState &State) {
3020   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3021 
3022   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
3023   // the first lane and part.
3024   if (isa<NoAliasScopeDeclInst>(Instr))
3025     if (!Instance.isFirstIteration())
3026       return;
3027 
3028   setDebugLocFromInst(Builder, Instr);
3029 
3030   // Does this instruction return a value ?
3031   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3032 
3033   Instruction *Cloned = Instr->clone();
3034   if (!IsVoidRetTy)
3035     Cloned->setName(Instr->getName() + ".cloned");
3036 
3037   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3038                                Builder.GetInsertPoint());
3039   // Replace the operands of the cloned instructions with their scalar
3040   // equivalents in the new loop.
3041   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
3042     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3043     auto InputInstance = Instance;
3044     if (!Operand || !OrigLoop->contains(Operand) ||
3045         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3046       InputInstance.Lane = VPLane::getFirstLane();
3047     auto *NewOp = State.get(User.getOperand(op), InputInstance);
3048     Cloned->setOperand(op, NewOp);
3049   }
3050   addNewMetadata(Cloned, Instr);
3051 
3052   // Place the cloned scalar in the new loop.
3053   Builder.Insert(Cloned);
3054 
3055   State.set(Def, Cloned, Instance);
3056 
3057   // If we just cloned a new assumption, add it the assumption cache.
3058   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3059     AC->registerAssumption(II);
3060 
3061   // End if-block.
3062   if (IfPredicateInstr)
3063     PredicatedInstructions.push_back(Cloned);
3064 }
3065 
3066 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3067                                                       Value *End, Value *Step,
3068                                                       Instruction *DL) {
3069   BasicBlock *Header = L->getHeader();
3070   BasicBlock *Latch = L->getLoopLatch();
3071   // As we're just creating this loop, it's possible no latch exists
3072   // yet. If so, use the header as this will be a single block loop.
3073   if (!Latch)
3074     Latch = Header;
3075 
3076   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3077   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3078   setDebugLocFromInst(Builder, OldInst);
3079   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3080 
3081   Builder.SetInsertPoint(Latch->getTerminator());
3082   setDebugLocFromInst(Builder, OldInst);
3083 
3084   // Create i+1 and fill the PHINode.
3085   //
3086   // If the tail is not folded, we know that End - Start >= Step (either
3087   // statically or through the minimum iteration checks). We also know that both
3088   // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV +
3089   // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned
3090   // overflows and we can mark the induction increment as NUW.
3091   Value *Next =
3092       Builder.CreateAdd(Induction, Step, "index.next",
3093                         /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false);
3094   Induction->addIncoming(Start, L->getLoopPreheader());
3095   Induction->addIncoming(Next, Latch);
3096   // Create the compare.
3097   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3098   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3099 
3100   // Now we have two terminators. Remove the old one from the block.
3101   Latch->getTerminator()->eraseFromParent();
3102 
3103   return Induction;
3104 }
3105 
3106 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3107   if (TripCount)
3108     return TripCount;
3109 
3110   assert(L && "Create Trip Count for null loop.");
3111   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3112   // Find the loop boundaries.
3113   ScalarEvolution *SE = PSE.getSE();
3114   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3115   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3116          "Invalid loop count");
3117 
3118   Type *IdxTy = Legal->getWidestInductionType();
3119   assert(IdxTy && "No type for induction");
3120 
3121   // The exit count might have the type of i64 while the phi is i32. This can
3122   // happen if we have an induction variable that is sign extended before the
3123   // compare. The only way that we get a backedge taken count is that the
3124   // induction variable was signed and as such will not overflow. In such a case
3125   // truncation is legal.
3126   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3127       IdxTy->getPrimitiveSizeInBits())
3128     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3129   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3130 
3131   // Get the total trip count from the count by adding 1.
3132   const SCEV *ExitCount = SE->getAddExpr(
3133       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3134 
3135   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3136 
3137   // Expand the trip count and place the new instructions in the preheader.
3138   // Notice that the pre-header does not change, only the loop body.
3139   SCEVExpander Exp(*SE, DL, "induction");
3140 
3141   // Count holds the overall loop count (N).
3142   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3143                                 L->getLoopPreheader()->getTerminator());
3144 
3145   if (TripCount->getType()->isPointerTy())
3146     TripCount =
3147         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3148                                     L->getLoopPreheader()->getTerminator());
3149 
3150   return TripCount;
3151 }
3152 
3153 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3154   if (VectorTripCount)
3155     return VectorTripCount;
3156 
3157   Value *TC = getOrCreateTripCount(L);
3158   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3159 
3160   Type *Ty = TC->getType();
3161   // This is where we can make the step a runtime constant.
3162   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3163 
3164   // If the tail is to be folded by masking, round the number of iterations N
3165   // up to a multiple of Step instead of rounding down. This is done by first
3166   // adding Step-1 and then rounding down. Note that it's ok if this addition
3167   // overflows: the vector induction variable will eventually wrap to zero given
3168   // that it starts at zero and its Step is a power of two; the loop will then
3169   // exit, with the last early-exit vector comparison also producing all-true.
3170   if (Cost->foldTailByMasking()) {
3171     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3172            "VF*UF must be a power of 2 when folding tail by masking");
3173     assert(!VF.isScalable() &&
3174            "Tail folding not yet supported for scalable vectors");
3175     TC = Builder.CreateAdd(
3176         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3177   }
3178 
3179   // Now we need to generate the expression for the part of the loop that the
3180   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3181   // iterations are not required for correctness, or N - Step, otherwise. Step
3182   // is equal to the vectorization factor (number of SIMD elements) times the
3183   // unroll factor (number of SIMD instructions).
3184   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3185 
3186   // There are two cases where we need to ensure (at least) the last iteration
3187   // runs in the scalar remainder loop. Thus, if the step evenly divides
3188   // the trip count, we set the remainder to be equal to the step. If the step
3189   // does not evenly divide the trip count, no adjustment is necessary since
3190   // there will already be scalar iterations. Note that the minimum iterations
3191   // check ensures that N >= Step. The cases are:
3192   // 1) If there is a non-reversed interleaved group that may speculatively
3193   //    access memory out-of-bounds.
3194   // 2) If any instruction may follow a conditionally taken exit. That is, if
3195   //    the loop contains multiple exiting blocks, or a single exiting block
3196   //    which is not the latch.
3197   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3198     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3199     R = Builder.CreateSelect(IsZero, Step, R);
3200   }
3201 
3202   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3203 
3204   return VectorTripCount;
3205 }
3206 
3207 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3208                                                    const DataLayout &DL) {
3209   // Verify that V is a vector type with same number of elements as DstVTy.
3210   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3211   unsigned VF = DstFVTy->getNumElements();
3212   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3213   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3214   Type *SrcElemTy = SrcVecTy->getElementType();
3215   Type *DstElemTy = DstFVTy->getElementType();
3216   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3217          "Vector elements must have same size");
3218 
3219   // Do a direct cast if element types are castable.
3220   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3221     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3222   }
3223   // V cannot be directly casted to desired vector type.
3224   // May happen when V is a floating point vector but DstVTy is a vector of
3225   // pointers or vice-versa. Handle this using a two-step bitcast using an
3226   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3227   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3228          "Only one type should be a pointer type");
3229   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3230          "Only one type should be a floating point type");
3231   Type *IntTy =
3232       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3233   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3234   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3235   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3236 }
3237 
3238 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3239                                                          BasicBlock *Bypass) {
3240   Value *Count = getOrCreateTripCount(L);
3241   // Reuse existing vector loop preheader for TC checks.
3242   // Note that new preheader block is generated for vector loop.
3243   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3244   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3245 
3246   // Generate code to check if the loop's trip count is less than VF * UF, or
3247   // equal to it in case a scalar epilogue is required; this implies that the
3248   // vector trip count is zero. This check also covers the case where adding one
3249   // to the backedge-taken count overflowed leading to an incorrect trip count
3250   // of zero. In this case we will also jump to the scalar loop.
3251   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3252                                           : ICmpInst::ICMP_ULT;
3253 
3254   // If tail is to be folded, vector loop takes care of all iterations.
3255   Value *CheckMinIters = Builder.getFalse();
3256   if (!Cost->foldTailByMasking()) {
3257     Value *Step =
3258         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3259     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3260   }
3261   // Create new preheader for vector loop.
3262   LoopVectorPreHeader =
3263       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3264                  "vector.ph");
3265 
3266   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3267                                DT->getNode(Bypass)->getIDom()) &&
3268          "TC check is expected to dominate Bypass");
3269 
3270   // Update dominator for Bypass & LoopExit.
3271   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3272   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3273 
3274   ReplaceInstWithInst(
3275       TCCheckBlock->getTerminator(),
3276       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3277   LoopBypassBlocks.push_back(TCCheckBlock);
3278 }
3279 
3280 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3281 
3282   BasicBlock *const SCEVCheckBlock =
3283       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3284   if (!SCEVCheckBlock)
3285     return nullptr;
3286 
3287   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3288            (OptForSizeBasedOnProfile &&
3289             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3290          "Cannot SCEV check stride or overflow when optimizing for size");
3291 
3292 
3293   // Update dominator only if this is first RT check.
3294   if (LoopBypassBlocks.empty()) {
3295     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3296     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3297   }
3298 
3299   LoopBypassBlocks.push_back(SCEVCheckBlock);
3300   AddedSafetyChecks = true;
3301   return SCEVCheckBlock;
3302 }
3303 
3304 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3305                                                       BasicBlock *Bypass) {
3306   // VPlan-native path does not do any analysis for runtime checks currently.
3307   if (EnableVPlanNativePath)
3308     return nullptr;
3309 
3310   BasicBlock *const MemCheckBlock =
3311       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3312 
3313   // Check if we generated code that checks in runtime if arrays overlap. We put
3314   // the checks into a separate block to make the more common case of few
3315   // elements faster.
3316   if (!MemCheckBlock)
3317     return nullptr;
3318 
3319   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3320     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3321            "Cannot emit memory checks when optimizing for size, unless forced "
3322            "to vectorize.");
3323     ORE->emit([&]() {
3324       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3325                                         L->getStartLoc(), L->getHeader())
3326              << "Code-size may be reduced by not forcing "
3327                 "vectorization, or by source-code modifications "
3328                 "eliminating the need for runtime checks "
3329                 "(e.g., adding 'restrict').";
3330     });
3331   }
3332 
3333   LoopBypassBlocks.push_back(MemCheckBlock);
3334 
3335   AddedSafetyChecks = true;
3336 
3337   // We currently don't use LoopVersioning for the actual loop cloning but we
3338   // still use it to add the noalias metadata.
3339   LVer = std::make_unique<LoopVersioning>(
3340       *Legal->getLAI(),
3341       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3342       DT, PSE.getSE());
3343   LVer->prepareNoAliasMetadata();
3344   return MemCheckBlock;
3345 }
3346 
3347 Value *InnerLoopVectorizer::emitTransformedIndex(
3348     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3349     const InductionDescriptor &ID) const {
3350 
3351   SCEVExpander Exp(*SE, DL, "induction");
3352   auto Step = ID.getStep();
3353   auto StartValue = ID.getStartValue();
3354   assert(Index->getType()->getScalarType() == Step->getType() &&
3355          "Index scalar type does not match StepValue type");
3356 
3357   // Note: the IR at this point is broken. We cannot use SE to create any new
3358   // SCEV and then expand it, hoping that SCEV's simplification will give us
3359   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3360   // lead to various SCEV crashes. So all we can do is to use builder and rely
3361   // on InstCombine for future simplifications. Here we handle some trivial
3362   // cases only.
3363   auto CreateAdd = [&B](Value *X, Value *Y) {
3364     assert(X->getType() == Y->getType() && "Types don't match!");
3365     if (auto *CX = dyn_cast<ConstantInt>(X))
3366       if (CX->isZero())
3367         return Y;
3368     if (auto *CY = dyn_cast<ConstantInt>(Y))
3369       if (CY->isZero())
3370         return X;
3371     return B.CreateAdd(X, Y);
3372   };
3373 
3374   // We allow X to be a vector type, in which case Y will potentially be
3375   // splatted into a vector with the same element count.
3376   auto CreateMul = [&B](Value *X, Value *Y) {
3377     assert(X->getType()->getScalarType() == Y->getType() &&
3378            "Types don't match!");
3379     if (auto *CX = dyn_cast<ConstantInt>(X))
3380       if (CX->isOne())
3381         return Y;
3382     if (auto *CY = dyn_cast<ConstantInt>(Y))
3383       if (CY->isOne())
3384         return X;
3385     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3386     if (XVTy && !isa<VectorType>(Y->getType()))
3387       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3388     return B.CreateMul(X, Y);
3389   };
3390 
3391   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3392   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3393   // the DomTree is not kept up-to-date for additional blocks generated in the
3394   // vector loop. By using the header as insertion point, we guarantee that the
3395   // expanded instructions dominate all their uses.
3396   auto GetInsertPoint = [this, &B]() {
3397     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3398     if (InsertBB != LoopVectorBody &&
3399         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3400       return LoopVectorBody->getTerminator();
3401     return &*B.GetInsertPoint();
3402   };
3403 
3404   switch (ID.getKind()) {
3405   case InductionDescriptor::IK_IntInduction: {
3406     assert(!isa<VectorType>(Index->getType()) &&
3407            "Vector indices not supported for integer inductions yet");
3408     assert(Index->getType() == StartValue->getType() &&
3409            "Index type does not match StartValue type");
3410     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3411       return B.CreateSub(StartValue, Index);
3412     auto *Offset = CreateMul(
3413         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3414     return CreateAdd(StartValue, Offset);
3415   }
3416   case InductionDescriptor::IK_PtrInduction: {
3417     assert(isa<SCEVConstant>(Step) &&
3418            "Expected constant step for pointer induction");
3419     return B.CreateGEP(
3420         StartValue->getType()->getPointerElementType(), StartValue,
3421         CreateMul(Index,
3422                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3423                                     GetInsertPoint())));
3424   }
3425   case InductionDescriptor::IK_FpInduction: {
3426     assert(!isa<VectorType>(Index->getType()) &&
3427            "Vector indices not supported for FP inductions yet");
3428     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3429     auto InductionBinOp = ID.getInductionBinOp();
3430     assert(InductionBinOp &&
3431            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3432             InductionBinOp->getOpcode() == Instruction::FSub) &&
3433            "Original bin op should be defined for FP induction");
3434 
3435     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3436     Value *MulExp = B.CreateFMul(StepValue, Index);
3437     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3438                          "induction");
3439   }
3440   case InductionDescriptor::IK_NoInduction:
3441     return nullptr;
3442   }
3443   llvm_unreachable("invalid enum");
3444 }
3445 
3446 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3447   LoopScalarBody = OrigLoop->getHeader();
3448   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3449   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3450   assert(LoopExitBlock && "Must have an exit block");
3451   assert(LoopVectorPreHeader && "Invalid loop structure");
3452 
3453   LoopMiddleBlock =
3454       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3455                  LI, nullptr, Twine(Prefix) + "middle.block");
3456   LoopScalarPreHeader =
3457       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3458                  nullptr, Twine(Prefix) + "scalar.ph");
3459 
3460   // Set up branch from middle block to the exit and scalar preheader blocks.
3461   // completeLoopSkeleton will update the condition to use an iteration check,
3462   // if required to decide whether to execute the remainder.
3463   BranchInst *BrInst =
3464       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3465   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3466   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3467   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3468 
3469   // We intentionally don't let SplitBlock to update LoopInfo since
3470   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3471   // LoopVectorBody is explicitly added to the correct place few lines later.
3472   LoopVectorBody =
3473       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3474                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3475 
3476   // Update dominator for loop exit.
3477   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3478 
3479   // Create and register the new vector loop.
3480   Loop *Lp = LI->AllocateLoop();
3481   Loop *ParentLoop = OrigLoop->getParentLoop();
3482 
3483   // Insert the new loop into the loop nest and register the new basic blocks
3484   // before calling any utilities such as SCEV that require valid LoopInfo.
3485   if (ParentLoop) {
3486     ParentLoop->addChildLoop(Lp);
3487   } else {
3488     LI->addTopLevelLoop(Lp);
3489   }
3490   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3491   return Lp;
3492 }
3493 
3494 void InnerLoopVectorizer::createInductionResumeValues(
3495     Loop *L, Value *VectorTripCount,
3496     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3497   assert(VectorTripCount && L && "Expected valid arguments");
3498   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3499           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3500          "Inconsistent information about additional bypass.");
3501   // We are going to resume the execution of the scalar loop.
3502   // Go over all of the induction variables that we found and fix the
3503   // PHIs that are left in the scalar version of the loop.
3504   // The starting values of PHI nodes depend on the counter of the last
3505   // iteration in the vectorized loop.
3506   // If we come from a bypass edge then we need to start from the original
3507   // start value.
3508   for (auto &InductionEntry : Legal->getInductionVars()) {
3509     PHINode *OrigPhi = InductionEntry.first;
3510     InductionDescriptor II = InductionEntry.second;
3511 
3512     // Create phi nodes to merge from the  backedge-taken check block.
3513     PHINode *BCResumeVal =
3514         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3515                         LoopScalarPreHeader->getTerminator());
3516     // Copy original phi DL over to the new one.
3517     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3518     Value *&EndValue = IVEndValues[OrigPhi];
3519     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3520     if (OrigPhi == OldInduction) {
3521       // We know what the end value is.
3522       EndValue = VectorTripCount;
3523     } else {
3524       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3525 
3526       // Fast-math-flags propagate from the original induction instruction.
3527       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3528         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3529 
3530       Type *StepType = II.getStep()->getType();
3531       Instruction::CastOps CastOp =
3532           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3533       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3534       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3535       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3536       EndValue->setName("ind.end");
3537 
3538       // Compute the end value for the additional bypass (if applicable).
3539       if (AdditionalBypass.first) {
3540         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3541         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3542                                          StepType, true);
3543         CRD =
3544             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3545         EndValueFromAdditionalBypass =
3546             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3547         EndValueFromAdditionalBypass->setName("ind.end");
3548       }
3549     }
3550     // The new PHI merges the original incoming value, in case of a bypass,
3551     // or the value at the end of the vectorized loop.
3552     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3553 
3554     // Fix the scalar body counter (PHI node).
3555     // The old induction's phi node in the scalar body needs the truncated
3556     // value.
3557     for (BasicBlock *BB : LoopBypassBlocks)
3558       BCResumeVal->addIncoming(II.getStartValue(), BB);
3559 
3560     if (AdditionalBypass.first)
3561       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3562                                             EndValueFromAdditionalBypass);
3563 
3564     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3565   }
3566 }
3567 
3568 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3569                                                       MDNode *OrigLoopID) {
3570   assert(L && "Expected valid loop.");
3571 
3572   // The trip counts should be cached by now.
3573   Value *Count = getOrCreateTripCount(L);
3574   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3575 
3576   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3577 
3578   // Add a check in the middle block to see if we have completed
3579   // all of the iterations in the first vector loop.
3580   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3581   // If tail is to be folded, we know we don't need to run the remainder.
3582   if (!Cost->foldTailByMasking()) {
3583     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3584                                         Count, VectorTripCount, "cmp.n",
3585                                         LoopMiddleBlock->getTerminator());
3586 
3587     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3588     // of the corresponding compare because they may have ended up with
3589     // different line numbers and we want to avoid awkward line stepping while
3590     // debugging. Eg. if the compare has got a line number inside the loop.
3591     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3592     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3593   }
3594 
3595   // Get ready to start creating new instructions into the vectorized body.
3596   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3597          "Inconsistent vector loop preheader");
3598   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3599 
3600   Optional<MDNode *> VectorizedLoopID =
3601       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3602                                       LLVMLoopVectorizeFollowupVectorized});
3603   if (VectorizedLoopID.hasValue()) {
3604     L->setLoopID(VectorizedLoopID.getValue());
3605 
3606     // Do not setAlreadyVectorized if loop attributes have been defined
3607     // explicitly.
3608     return LoopVectorPreHeader;
3609   }
3610 
3611   // Keep all loop hints from the original loop on the vector loop (we'll
3612   // replace the vectorizer-specific hints below).
3613   if (MDNode *LID = OrigLoop->getLoopID())
3614     L->setLoopID(LID);
3615 
3616   LoopVectorizeHints Hints(L, true, *ORE);
3617   Hints.setAlreadyVectorized();
3618 
3619 #ifdef EXPENSIVE_CHECKS
3620   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3621   LI->verify(*DT);
3622 #endif
3623 
3624   return LoopVectorPreHeader;
3625 }
3626 
3627 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3628   /*
3629    In this function we generate a new loop. The new loop will contain
3630    the vectorized instructions while the old loop will continue to run the
3631    scalar remainder.
3632 
3633        [ ] <-- loop iteration number check.
3634     /   |
3635    /    v
3636   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3637   |  /  |
3638   | /   v
3639   ||   [ ]     <-- vector pre header.
3640   |/    |
3641   |     v
3642   |    [  ] \
3643   |    [  ]_|   <-- vector loop.
3644   |     |
3645   |     v
3646   |   -[ ]   <--- middle-block.
3647   |  /  |
3648   | /   v
3649   -|- >[ ]     <--- new preheader.
3650    |    |
3651    |    v
3652    |   [ ] \
3653    |   [ ]_|   <-- old scalar loop to handle remainder.
3654     \   |
3655      \  v
3656       >[ ]     <-- exit block.
3657    ...
3658    */
3659 
3660   // Get the metadata of the original loop before it gets modified.
3661   MDNode *OrigLoopID = OrigLoop->getLoopID();
3662 
3663   // Workaround!  Compute the trip count of the original loop and cache it
3664   // before we start modifying the CFG.  This code has a systemic problem
3665   // wherein it tries to run analysis over partially constructed IR; this is
3666   // wrong, and not simply for SCEV.  The trip count of the original loop
3667   // simply happens to be prone to hitting this in practice.  In theory, we
3668   // can hit the same issue for any SCEV, or ValueTracking query done during
3669   // mutation.  See PR49900.
3670   getOrCreateTripCount(OrigLoop);
3671 
3672   // Create an empty vector loop, and prepare basic blocks for the runtime
3673   // checks.
3674   Loop *Lp = createVectorLoopSkeleton("");
3675 
3676   // Now, compare the new count to zero. If it is zero skip the vector loop and
3677   // jump to the scalar loop. This check also covers the case where the
3678   // backedge-taken count is uint##_max: adding one to it will overflow leading
3679   // to an incorrect trip count of zero. In this (rare) case we will also jump
3680   // to the scalar loop.
3681   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3682 
3683   // Generate the code to check any assumptions that we've made for SCEV
3684   // expressions.
3685   emitSCEVChecks(Lp, LoopScalarPreHeader);
3686 
3687   // Generate the code that checks in runtime if arrays overlap. We put the
3688   // checks into a separate block to make the more common case of few elements
3689   // faster.
3690   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3691 
3692   // Some loops have a single integer induction variable, while other loops
3693   // don't. One example is c++ iterators that often have multiple pointer
3694   // induction variables. In the code below we also support a case where we
3695   // don't have a single induction variable.
3696   //
3697   // We try to obtain an induction variable from the original loop as hard
3698   // as possible. However if we don't find one that:
3699   //   - is an integer
3700   //   - counts from zero, stepping by one
3701   //   - is the size of the widest induction variable type
3702   // then we create a new one.
3703   OldInduction = Legal->getPrimaryInduction();
3704   Type *IdxTy = Legal->getWidestInductionType();
3705   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3706   // The loop step is equal to the vectorization factor (num of SIMD elements)
3707   // times the unroll factor (num of SIMD instructions).
3708   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3709   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3710   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3711   Induction =
3712       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3713                               getDebugLocFromInstOrOperands(OldInduction));
3714 
3715   // Emit phis for the new starting index of the scalar loop.
3716   createInductionResumeValues(Lp, CountRoundDown);
3717 
3718   return completeLoopSkeleton(Lp, OrigLoopID);
3719 }
3720 
3721 // Fix up external users of the induction variable. At this point, we are
3722 // in LCSSA form, with all external PHIs that use the IV having one input value,
3723 // coming from the remainder loop. We need those PHIs to also have a correct
3724 // value for the IV when arriving directly from the middle block.
3725 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3726                                        const InductionDescriptor &II,
3727                                        Value *CountRoundDown, Value *EndValue,
3728                                        BasicBlock *MiddleBlock) {
3729   // There are two kinds of external IV usages - those that use the value
3730   // computed in the last iteration (the PHI) and those that use the penultimate
3731   // value (the value that feeds into the phi from the loop latch).
3732   // We allow both, but they, obviously, have different values.
3733 
3734   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3735 
3736   DenseMap<Value *, Value *> MissingVals;
3737 
3738   // An external user of the last iteration's value should see the value that
3739   // the remainder loop uses to initialize its own IV.
3740   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3741   for (User *U : PostInc->users()) {
3742     Instruction *UI = cast<Instruction>(U);
3743     if (!OrigLoop->contains(UI)) {
3744       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3745       MissingVals[UI] = EndValue;
3746     }
3747   }
3748 
3749   // An external user of the penultimate value need to see EndValue - Step.
3750   // The simplest way to get this is to recompute it from the constituent SCEVs,
3751   // that is Start + (Step * (CRD - 1)).
3752   for (User *U : OrigPhi->users()) {
3753     auto *UI = cast<Instruction>(U);
3754     if (!OrigLoop->contains(UI)) {
3755       const DataLayout &DL =
3756           OrigLoop->getHeader()->getModule()->getDataLayout();
3757       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3758 
3759       IRBuilder<> B(MiddleBlock->getTerminator());
3760 
3761       // Fast-math-flags propagate from the original induction instruction.
3762       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3763         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3764 
3765       Value *CountMinusOne = B.CreateSub(
3766           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3767       Value *CMO =
3768           !II.getStep()->getType()->isIntegerTy()
3769               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3770                              II.getStep()->getType())
3771               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3772       CMO->setName("cast.cmo");
3773       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3774       Escape->setName("ind.escape");
3775       MissingVals[UI] = Escape;
3776     }
3777   }
3778 
3779   for (auto &I : MissingVals) {
3780     PHINode *PHI = cast<PHINode>(I.first);
3781     // One corner case we have to handle is two IVs "chasing" each-other,
3782     // that is %IV2 = phi [...], [ %IV1, %latch ]
3783     // In this case, if IV1 has an external use, we need to avoid adding both
3784     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3785     // don't already have an incoming value for the middle block.
3786     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3787       PHI->addIncoming(I.second, MiddleBlock);
3788   }
3789 }
3790 
3791 namespace {
3792 
3793 struct CSEDenseMapInfo {
3794   static bool canHandle(const Instruction *I) {
3795     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3796            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3797   }
3798 
3799   static inline Instruction *getEmptyKey() {
3800     return DenseMapInfo<Instruction *>::getEmptyKey();
3801   }
3802 
3803   static inline Instruction *getTombstoneKey() {
3804     return DenseMapInfo<Instruction *>::getTombstoneKey();
3805   }
3806 
3807   static unsigned getHashValue(const Instruction *I) {
3808     assert(canHandle(I) && "Unknown instruction!");
3809     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3810                                                            I->value_op_end()));
3811   }
3812 
3813   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3814     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3815         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3816       return LHS == RHS;
3817     return LHS->isIdenticalTo(RHS);
3818   }
3819 };
3820 
3821 } // end anonymous namespace
3822 
3823 ///Perform cse of induction variable instructions.
3824 static void cse(BasicBlock *BB) {
3825   // Perform simple cse.
3826   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3827   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3828     Instruction *In = &*I++;
3829 
3830     if (!CSEDenseMapInfo::canHandle(In))
3831       continue;
3832 
3833     // Check if we can replace this instruction with any of the
3834     // visited instructions.
3835     if (Instruction *V = CSEMap.lookup(In)) {
3836       In->replaceAllUsesWith(V);
3837       In->eraseFromParent();
3838       continue;
3839     }
3840 
3841     CSEMap[In] = In;
3842   }
3843 }
3844 
3845 InstructionCost
3846 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3847                                               bool &NeedToScalarize) const {
3848   Function *F = CI->getCalledFunction();
3849   Type *ScalarRetTy = CI->getType();
3850   SmallVector<Type *, 4> Tys, ScalarTys;
3851   for (auto &ArgOp : CI->arg_operands())
3852     ScalarTys.push_back(ArgOp->getType());
3853 
3854   // Estimate cost of scalarized vector call. The source operands are assumed
3855   // to be vectors, so we need to extract individual elements from there,
3856   // execute VF scalar calls, and then gather the result into the vector return
3857   // value.
3858   InstructionCost ScalarCallCost =
3859       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3860   if (VF.isScalar())
3861     return ScalarCallCost;
3862 
3863   // Compute corresponding vector type for return value and arguments.
3864   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3865   for (Type *ScalarTy : ScalarTys)
3866     Tys.push_back(ToVectorTy(ScalarTy, VF));
3867 
3868   // Compute costs of unpacking argument values for the scalar calls and
3869   // packing the return values to a vector.
3870   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3871 
3872   InstructionCost Cost =
3873       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3874 
3875   // If we can't emit a vector call for this function, then the currently found
3876   // cost is the cost we need to return.
3877   NeedToScalarize = true;
3878   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3879   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3880 
3881   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3882     return Cost;
3883 
3884   // If the corresponding vector cost is cheaper, return its cost.
3885   InstructionCost VectorCallCost =
3886       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3887   if (VectorCallCost < Cost) {
3888     NeedToScalarize = false;
3889     Cost = VectorCallCost;
3890   }
3891   return Cost;
3892 }
3893 
3894 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3895   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3896     return Elt;
3897   return VectorType::get(Elt, VF);
3898 }
3899 
3900 InstructionCost
3901 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3902                                                    ElementCount VF) const {
3903   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3904   assert(ID && "Expected intrinsic call!");
3905   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3906   FastMathFlags FMF;
3907   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3908     FMF = FPMO->getFastMathFlags();
3909 
3910   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3911   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3912   SmallVector<Type *> ParamTys;
3913   std::transform(FTy->param_begin(), FTy->param_end(),
3914                  std::back_inserter(ParamTys),
3915                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3916 
3917   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3918                                     dyn_cast<IntrinsicInst>(CI));
3919   return TTI.getIntrinsicInstrCost(CostAttrs,
3920                                    TargetTransformInfo::TCK_RecipThroughput);
3921 }
3922 
3923 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3924   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3925   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3926   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3927 }
3928 
3929 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3930   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3931   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3932   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3933 }
3934 
3935 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3936   // For every instruction `I` in MinBWs, truncate the operands, create a
3937   // truncated version of `I` and reextend its result. InstCombine runs
3938   // later and will remove any ext/trunc pairs.
3939   SmallPtrSet<Value *, 4> Erased;
3940   for (const auto &KV : Cost->getMinimalBitwidths()) {
3941     // If the value wasn't vectorized, we must maintain the original scalar
3942     // type. The absence of the value from State indicates that it
3943     // wasn't vectorized.
3944     VPValue *Def = State.Plan->getVPValue(KV.first);
3945     if (!State.hasAnyVectorValue(Def))
3946       continue;
3947     for (unsigned Part = 0; Part < UF; ++Part) {
3948       Value *I = State.get(Def, Part);
3949       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3950         continue;
3951       Type *OriginalTy = I->getType();
3952       Type *ScalarTruncatedTy =
3953           IntegerType::get(OriginalTy->getContext(), KV.second);
3954       auto *TruncatedTy = FixedVectorType::get(
3955           ScalarTruncatedTy,
3956           cast<FixedVectorType>(OriginalTy)->getNumElements());
3957       if (TruncatedTy == OriginalTy)
3958         continue;
3959 
3960       IRBuilder<> B(cast<Instruction>(I));
3961       auto ShrinkOperand = [&](Value *V) -> Value * {
3962         if (auto *ZI = dyn_cast<ZExtInst>(V))
3963           if (ZI->getSrcTy() == TruncatedTy)
3964             return ZI->getOperand(0);
3965         return B.CreateZExtOrTrunc(V, TruncatedTy);
3966       };
3967 
3968       // The actual instruction modification depends on the instruction type,
3969       // unfortunately.
3970       Value *NewI = nullptr;
3971       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3972         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3973                              ShrinkOperand(BO->getOperand(1)));
3974 
3975         // Any wrapping introduced by shrinking this operation shouldn't be
3976         // considered undefined behavior. So, we can't unconditionally copy
3977         // arithmetic wrapping flags to NewI.
3978         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3979       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3980         NewI =
3981             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3982                          ShrinkOperand(CI->getOperand(1)));
3983       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3984         NewI = B.CreateSelect(SI->getCondition(),
3985                               ShrinkOperand(SI->getTrueValue()),
3986                               ShrinkOperand(SI->getFalseValue()));
3987       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3988         switch (CI->getOpcode()) {
3989         default:
3990           llvm_unreachable("Unhandled cast!");
3991         case Instruction::Trunc:
3992           NewI = ShrinkOperand(CI->getOperand(0));
3993           break;
3994         case Instruction::SExt:
3995           NewI = B.CreateSExtOrTrunc(
3996               CI->getOperand(0),
3997               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3998           break;
3999         case Instruction::ZExt:
4000           NewI = B.CreateZExtOrTrunc(
4001               CI->getOperand(0),
4002               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4003           break;
4004         }
4005       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
4006         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
4007                              ->getNumElements();
4008         auto *O0 = B.CreateZExtOrTrunc(
4009             SI->getOperand(0),
4010             FixedVectorType::get(ScalarTruncatedTy, Elements0));
4011         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
4012                              ->getNumElements();
4013         auto *O1 = B.CreateZExtOrTrunc(
4014             SI->getOperand(1),
4015             FixedVectorType::get(ScalarTruncatedTy, Elements1));
4016 
4017         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
4018       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
4019         // Don't do anything with the operands, just extend the result.
4020         continue;
4021       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
4022         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
4023                             ->getNumElements();
4024         auto *O0 = B.CreateZExtOrTrunc(
4025             IE->getOperand(0),
4026             FixedVectorType::get(ScalarTruncatedTy, Elements));
4027         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4028         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4029       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4030         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
4031                             ->getNumElements();
4032         auto *O0 = B.CreateZExtOrTrunc(
4033             EE->getOperand(0),
4034             FixedVectorType::get(ScalarTruncatedTy, Elements));
4035         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4036       } else {
4037         // If we don't know what to do, be conservative and don't do anything.
4038         continue;
4039       }
4040 
4041       // Lastly, extend the result.
4042       NewI->takeName(cast<Instruction>(I));
4043       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4044       I->replaceAllUsesWith(Res);
4045       cast<Instruction>(I)->eraseFromParent();
4046       Erased.insert(I);
4047       State.reset(Def, Res, Part);
4048     }
4049   }
4050 
4051   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4052   for (const auto &KV : Cost->getMinimalBitwidths()) {
4053     // If the value wasn't vectorized, we must maintain the original scalar
4054     // type. The absence of the value from State indicates that it
4055     // wasn't vectorized.
4056     VPValue *Def = State.Plan->getVPValue(KV.first);
4057     if (!State.hasAnyVectorValue(Def))
4058       continue;
4059     for (unsigned Part = 0; Part < UF; ++Part) {
4060       Value *I = State.get(Def, Part);
4061       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4062       if (Inst && Inst->use_empty()) {
4063         Value *NewI = Inst->getOperand(0);
4064         Inst->eraseFromParent();
4065         State.reset(Def, NewI, Part);
4066       }
4067     }
4068   }
4069 }
4070 
4071 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4072   // Insert truncates and extends for any truncated instructions as hints to
4073   // InstCombine.
4074   if (VF.isVector())
4075     truncateToMinimalBitwidths(State);
4076 
4077   // Fix widened non-induction PHIs by setting up the PHI operands.
4078   if (OrigPHIsToFix.size()) {
4079     assert(EnableVPlanNativePath &&
4080            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4081     fixNonInductionPHIs(State);
4082   }
4083 
4084   // At this point every instruction in the original loop is widened to a
4085   // vector form. Now we need to fix the recurrences in the loop. These PHI
4086   // nodes are currently empty because we did not want to introduce cycles.
4087   // This is the second stage of vectorizing recurrences.
4088   fixCrossIterationPHIs(State);
4089 
4090   // Forget the original basic block.
4091   PSE.getSE()->forgetLoop(OrigLoop);
4092 
4093   // Fix-up external users of the induction variables.
4094   for (auto &Entry : Legal->getInductionVars())
4095     fixupIVUsers(Entry.first, Entry.second,
4096                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4097                  IVEndValues[Entry.first], LoopMiddleBlock);
4098 
4099   fixLCSSAPHIs(State);
4100   for (Instruction *PI : PredicatedInstructions)
4101     sinkScalarOperands(&*PI);
4102 
4103   // Remove redundant induction instructions.
4104   cse(LoopVectorBody);
4105 
4106   // Set/update profile weights for the vector and remainder loops as original
4107   // loop iterations are now distributed among them. Note that original loop
4108   // represented by LoopScalarBody becomes remainder loop after vectorization.
4109   //
4110   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4111   // end up getting slightly roughened result but that should be OK since
4112   // profile is not inherently precise anyway. Note also possible bypass of
4113   // vector code caused by legality checks is ignored, assigning all the weight
4114   // to the vector loop, optimistically.
4115   //
4116   // For scalable vectorization we can't know at compile time how many iterations
4117   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4118   // vscale of '1'.
4119   setProfileInfoAfterUnrolling(
4120       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4121       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4122 }
4123 
4124 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4125   // In order to support recurrences we need to be able to vectorize Phi nodes.
4126   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4127   // stage #2: We now need to fix the recurrences by adding incoming edges to
4128   // the currently empty PHI nodes. At this point every instruction in the
4129   // original loop is widened to a vector form so we can use them to construct
4130   // the incoming edges.
4131   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4132   for (VPRecipeBase &R : Header->phis()) {
4133     auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R);
4134     if (!PhiR)
4135       continue;
4136     auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4137     if (PhiR->getRecurrenceDescriptor()) {
4138       fixReduction(PhiR, State);
4139     } else if (Legal->isFirstOrderRecurrence(OrigPhi))
4140       fixFirstOrderRecurrence(OrigPhi, State);
4141   }
4142 }
4143 
4144 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
4145                                                   VPTransformState &State) {
4146   // This is the second phase of vectorizing first-order recurrences. An
4147   // overview of the transformation is described below. Suppose we have the
4148   // following loop.
4149   //
4150   //   for (int i = 0; i < n; ++i)
4151   //     b[i] = a[i] - a[i - 1];
4152   //
4153   // There is a first-order recurrence on "a". For this loop, the shorthand
4154   // scalar IR looks like:
4155   //
4156   //   scalar.ph:
4157   //     s_init = a[-1]
4158   //     br scalar.body
4159   //
4160   //   scalar.body:
4161   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4162   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4163   //     s2 = a[i]
4164   //     b[i] = s2 - s1
4165   //     br cond, scalar.body, ...
4166   //
4167   // In this example, s1 is a recurrence because it's value depends on the
4168   // previous iteration. In the first phase of vectorization, we created a
4169   // temporary value for s1. We now complete the vectorization and produce the
4170   // shorthand vector IR shown below (for VF = 4, UF = 1).
4171   //
4172   //   vector.ph:
4173   //     v_init = vector(..., ..., ..., a[-1])
4174   //     br vector.body
4175   //
4176   //   vector.body
4177   //     i = phi [0, vector.ph], [i+4, vector.body]
4178   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4179   //     v2 = a[i, i+1, i+2, i+3];
4180   //     v3 = vector(v1(3), v2(0, 1, 2))
4181   //     b[i, i+1, i+2, i+3] = v2 - v3
4182   //     br cond, vector.body, middle.block
4183   //
4184   //   middle.block:
4185   //     x = v2(3)
4186   //     br scalar.ph
4187   //
4188   //   scalar.ph:
4189   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4190   //     br scalar.body
4191   //
4192   // After execution completes the vector loop, we extract the next value of
4193   // the recurrence (x) to use as the initial value in the scalar loop.
4194 
4195   // Get the original loop preheader and single loop latch.
4196   auto *Preheader = OrigLoop->getLoopPreheader();
4197   auto *Latch = OrigLoop->getLoopLatch();
4198 
4199   // Get the initial and previous values of the scalar recurrence.
4200   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4201   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4202 
4203   auto *IdxTy = Builder.getInt32Ty();
4204   auto *One = ConstantInt::get(IdxTy, 1);
4205 
4206   // Create a vector from the initial value.
4207   auto *VectorInit = ScalarInit;
4208   if (VF.isVector()) {
4209     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4210     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4211     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4212     VectorInit = Builder.CreateInsertElement(
4213         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)),
4214         VectorInit, LastIdx, "vector.recur.init");
4215   }
4216 
4217   VPValue *PhiDef = State.Plan->getVPValue(Phi);
4218   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
4219   // We constructed a temporary phi node in the first phase of vectorization.
4220   // This phi node will eventually be deleted.
4221   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
4222 
4223   // Create a phi node for the new recurrence. The current value will either be
4224   // the initial value inserted into a vector or loop-varying vector value.
4225   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4226   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4227 
4228   // Get the vectorized previous value of the last part UF - 1. It appears last
4229   // among all unrolled iterations, due to the order of their construction.
4230   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4231 
4232   // Find and set the insertion point after the previous value if it is an
4233   // instruction.
4234   BasicBlock::iterator InsertPt;
4235   // Note that the previous value may have been constant-folded so it is not
4236   // guaranteed to be an instruction in the vector loop.
4237   // FIXME: Loop invariant values do not form recurrences. We should deal with
4238   //        them earlier.
4239   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4240     InsertPt = LoopVectorBody->getFirstInsertionPt();
4241   else {
4242     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4243     if (isa<PHINode>(PreviousLastPart))
4244       // If the previous value is a phi node, we should insert after all the phi
4245       // nodes in the block containing the PHI to avoid breaking basic block
4246       // verification. Note that the basic block may be different to
4247       // LoopVectorBody, in case we predicate the loop.
4248       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4249     else
4250       InsertPt = ++PreviousInst->getIterator();
4251   }
4252   Builder.SetInsertPoint(&*InsertPt);
4253 
4254   // The vector from which to take the initial value for the current iteration
4255   // (actual or unrolled). Initially, this is the vector phi node.
4256   Value *Incoming = VecPhi;
4257 
4258   // Shuffle the current and previous vector and update the vector parts.
4259   for (unsigned Part = 0; Part < UF; ++Part) {
4260     Value *PreviousPart = State.get(PreviousDef, Part);
4261     Value *PhiPart = State.get(PhiDef, Part);
4262     auto *Shuffle = VF.isVector()
4263                         ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1)
4264                         : Incoming;
4265     PhiPart->replaceAllUsesWith(Shuffle);
4266     cast<Instruction>(PhiPart)->eraseFromParent();
4267     State.reset(PhiDef, Shuffle, Part);
4268     Incoming = PreviousPart;
4269   }
4270 
4271   // Fix the latch value of the new recurrence in the vector loop.
4272   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4273 
4274   // Extract the last vector element in the middle block. This will be the
4275   // initial value for the recurrence when jumping to the scalar loop.
4276   auto *ExtractForScalar = Incoming;
4277   if (VF.isVector()) {
4278     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4279     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4280     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4281     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4282                                                     "vector.recur.extract");
4283   }
4284   // Extract the second last element in the middle block if the
4285   // Phi is used outside the loop. We need to extract the phi itself
4286   // and not the last element (the phi update in the current iteration). This
4287   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4288   // when the scalar loop is not run at all.
4289   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4290   if (VF.isVector()) {
4291     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4292     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4293     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4294         Incoming, Idx, "vector.recur.extract.for.phi");
4295   } else if (UF > 1)
4296     // When loop is unrolled without vectorizing, initialize
4297     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4298     // of `Incoming`. This is analogous to the vectorized case above: extracting
4299     // the second last element when VF > 1.
4300     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4301 
4302   // Fix the initial value of the original recurrence in the scalar loop.
4303   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4304   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4305   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4306     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4307     Start->addIncoming(Incoming, BB);
4308   }
4309 
4310   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4311   Phi->setName("scalar.recur");
4312 
4313   // Finally, fix users of the recurrence outside the loop. The users will need
4314   // either the last value of the scalar recurrence or the last value of the
4315   // vector recurrence we extracted in the middle block. Since the loop is in
4316   // LCSSA form, we just need to find all the phi nodes for the original scalar
4317   // recurrence in the exit block, and then add an edge for the middle block.
4318   // Note that LCSSA does not imply single entry when the original scalar loop
4319   // had multiple exiting edges (as we always run the last iteration in the
4320   // scalar epilogue); in that case, the exiting path through middle will be
4321   // dynamically dead and the value picked for the phi doesn't matter.
4322   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4323     if (any_of(LCSSAPhi.incoming_values(),
4324                [Phi](Value *V) { return V == Phi; }))
4325       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4326 }
4327 
4328 void InnerLoopVectorizer::fixReduction(VPWidenPHIRecipe *PhiR,
4329                                        VPTransformState &State) {
4330   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4331   // Get it's reduction variable descriptor.
4332   assert(Legal->isReductionVariable(OrigPhi) &&
4333          "Unable to find the reduction variable");
4334   const RecurrenceDescriptor &RdxDesc = *PhiR->getRecurrenceDescriptor();
4335 
4336   RecurKind RK = RdxDesc.getRecurrenceKind();
4337   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4338   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4339   setDebugLocFromInst(Builder, ReductionStartValue);
4340   bool IsInLoopReductionPhi = Cost->isInLoopReduction(OrigPhi);
4341 
4342   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4343   // This is the vector-clone of the value that leaves the loop.
4344   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4345 
4346   // Wrap flags are in general invalid after vectorization, clear them.
4347   clearReductionWrapFlags(RdxDesc, State);
4348 
4349   // Fix the vector-loop phi.
4350 
4351   // Reductions do not have to start at zero. They can start with
4352   // any loop invariant values.
4353   BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4354 
4355   bool IsOrdered = State.VF.isVector() && IsInLoopReductionPhi &&
4356                    Cost->useOrderedReductions(RdxDesc);
4357 
4358   for (unsigned Part = 0; Part < UF; ++Part) {
4359     if (IsOrdered && Part > 0)
4360       break;
4361     Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part);
4362     Value *Val = State.get(PhiR->getBackedgeValue(), Part);
4363     if (IsOrdered)
4364       Val = State.get(PhiR->getBackedgeValue(), UF - 1);
4365 
4366     cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch);
4367   }
4368 
4369   // Before each round, move the insertion point right between
4370   // the PHIs and the values we are going to write.
4371   // This allows us to write both PHINodes and the extractelement
4372   // instructions.
4373   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4374 
4375   setDebugLocFromInst(Builder, LoopExitInst);
4376 
4377   Type *PhiTy = OrigPhi->getType();
4378   // If tail is folded by masking, the vector value to leave the loop should be
4379   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4380   // instead of the former. For an inloop reduction the reduction will already
4381   // be predicated, and does not need to be handled here.
4382   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4383     for (unsigned Part = 0; Part < UF; ++Part) {
4384       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4385       Value *Sel = nullptr;
4386       for (User *U : VecLoopExitInst->users()) {
4387         if (isa<SelectInst>(U)) {
4388           assert(!Sel && "Reduction exit feeding two selects");
4389           Sel = U;
4390         } else
4391           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4392       }
4393       assert(Sel && "Reduction exit feeds no select");
4394       State.reset(LoopExitInstDef, Sel, Part);
4395 
4396       // If the target can create a predicated operator for the reduction at no
4397       // extra cost in the loop (for example a predicated vadd), it can be
4398       // cheaper for the select to remain in the loop than be sunk out of it,
4399       // and so use the select value for the phi instead of the old
4400       // LoopExitValue.
4401       if (PreferPredicatedReductionSelect ||
4402           TTI->preferPredicatedReductionSelect(
4403               RdxDesc.getOpcode(), PhiTy,
4404               TargetTransformInfo::ReductionFlags())) {
4405         auto *VecRdxPhi =
4406             cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part));
4407         VecRdxPhi->setIncomingValueForBlock(
4408             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4409       }
4410     }
4411   }
4412 
4413   // If the vector reduction can be performed in a smaller type, we truncate
4414   // then extend the loop exit value to enable InstCombine to evaluate the
4415   // entire expression in the smaller type.
4416   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4417     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4418     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4419     Builder.SetInsertPoint(
4420         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4421     VectorParts RdxParts(UF);
4422     for (unsigned Part = 0; Part < UF; ++Part) {
4423       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4424       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4425       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4426                                         : Builder.CreateZExt(Trunc, VecTy);
4427       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4428            UI != RdxParts[Part]->user_end();)
4429         if (*UI != Trunc) {
4430           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4431           RdxParts[Part] = Extnd;
4432         } else {
4433           ++UI;
4434         }
4435     }
4436     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4437     for (unsigned Part = 0; Part < UF; ++Part) {
4438       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4439       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4440     }
4441   }
4442 
4443   // Reduce all of the unrolled parts into a single vector.
4444   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4445   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4446 
4447   // The middle block terminator has already been assigned a DebugLoc here (the
4448   // OrigLoop's single latch terminator). We want the whole middle block to
4449   // appear to execute on this line because: (a) it is all compiler generated,
4450   // (b) these instructions are always executed after evaluating the latch
4451   // conditional branch, and (c) other passes may add new predecessors which
4452   // terminate on this line. This is the easiest way to ensure we don't
4453   // accidentally cause an extra step back into the loop while debugging.
4454   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4455   if (IsOrdered)
4456     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4457   else {
4458     // Floating-point operations should have some FMF to enable the reduction.
4459     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4460     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4461     for (unsigned Part = 1; Part < UF; ++Part) {
4462       Value *RdxPart = State.get(LoopExitInstDef, Part);
4463       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4464         ReducedPartRdx = Builder.CreateBinOp(
4465             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4466       } else {
4467         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4468       }
4469     }
4470   }
4471 
4472   // Create the reduction after the loop. Note that inloop reductions create the
4473   // target reduction in the loop using a Reduction recipe.
4474   if (VF.isVector() && !IsInLoopReductionPhi) {
4475     ReducedPartRdx =
4476         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4477     // If the reduction can be performed in a smaller type, we need to extend
4478     // the reduction to the wider type before we branch to the original loop.
4479     if (PhiTy != RdxDesc.getRecurrenceType())
4480       ReducedPartRdx = RdxDesc.isSigned()
4481                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4482                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4483   }
4484 
4485   // Create a phi node that merges control-flow from the backedge-taken check
4486   // block and the middle block.
4487   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4488                                         LoopScalarPreHeader->getTerminator());
4489   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4490     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4491   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4492 
4493   // Now, we need to fix the users of the reduction variable
4494   // inside and outside of the scalar remainder loop.
4495 
4496   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4497   // in the exit blocks.  See comment on analogous loop in
4498   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4499   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4500     if (any_of(LCSSAPhi.incoming_values(),
4501                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4502       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4503 
4504   // Fix the scalar loop reduction variable with the incoming reduction sum
4505   // from the vector body and from the backedge value.
4506   int IncomingEdgeBlockIdx =
4507       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4508   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4509   // Pick the other block.
4510   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4511   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4512   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4513 }
4514 
4515 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4516                                                   VPTransformState &State) {
4517   RecurKind RK = RdxDesc.getRecurrenceKind();
4518   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4519     return;
4520 
4521   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4522   assert(LoopExitInstr && "null loop exit instruction");
4523   SmallVector<Instruction *, 8> Worklist;
4524   SmallPtrSet<Instruction *, 8> Visited;
4525   Worklist.push_back(LoopExitInstr);
4526   Visited.insert(LoopExitInstr);
4527 
4528   while (!Worklist.empty()) {
4529     Instruction *Cur = Worklist.pop_back_val();
4530     if (isa<OverflowingBinaryOperator>(Cur))
4531       for (unsigned Part = 0; Part < UF; ++Part) {
4532         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4533         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4534       }
4535 
4536     for (User *U : Cur->users()) {
4537       Instruction *UI = cast<Instruction>(U);
4538       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4539           Visited.insert(UI).second)
4540         Worklist.push_back(UI);
4541     }
4542   }
4543 }
4544 
4545 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4546   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4547     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4548       // Some phis were already hand updated by the reduction and recurrence
4549       // code above, leave them alone.
4550       continue;
4551 
4552     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4553     // Non-instruction incoming values will have only one value.
4554 
4555     VPLane Lane = VPLane::getFirstLane();
4556     if (isa<Instruction>(IncomingValue) &&
4557         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4558                                            VF))
4559       Lane = VPLane::getLastLaneForVF(VF);
4560 
4561     // Can be a loop invariant incoming value or the last scalar value to be
4562     // extracted from the vectorized loop.
4563     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4564     Value *lastIncomingValue =
4565         OrigLoop->isLoopInvariant(IncomingValue)
4566             ? IncomingValue
4567             : State.get(State.Plan->getVPValue(IncomingValue),
4568                         VPIteration(UF - 1, Lane));
4569     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4570   }
4571 }
4572 
4573 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4574   // The basic block and loop containing the predicated instruction.
4575   auto *PredBB = PredInst->getParent();
4576   auto *VectorLoop = LI->getLoopFor(PredBB);
4577 
4578   // Initialize a worklist with the operands of the predicated instruction.
4579   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4580 
4581   // Holds instructions that we need to analyze again. An instruction may be
4582   // reanalyzed if we don't yet know if we can sink it or not.
4583   SmallVector<Instruction *, 8> InstsToReanalyze;
4584 
4585   // Returns true if a given use occurs in the predicated block. Phi nodes use
4586   // their operands in their corresponding predecessor blocks.
4587   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4588     auto *I = cast<Instruction>(U.getUser());
4589     BasicBlock *BB = I->getParent();
4590     if (auto *Phi = dyn_cast<PHINode>(I))
4591       BB = Phi->getIncomingBlock(
4592           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4593     return BB == PredBB;
4594   };
4595 
4596   // Iteratively sink the scalarized operands of the predicated instruction
4597   // into the block we created for it. When an instruction is sunk, it's
4598   // operands are then added to the worklist. The algorithm ends after one pass
4599   // through the worklist doesn't sink a single instruction.
4600   bool Changed;
4601   do {
4602     // Add the instructions that need to be reanalyzed to the worklist, and
4603     // reset the changed indicator.
4604     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4605     InstsToReanalyze.clear();
4606     Changed = false;
4607 
4608     while (!Worklist.empty()) {
4609       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4610 
4611       // We can't sink an instruction if it is a phi node, is not in the loop,
4612       // or may have side effects.
4613       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4614           I->mayHaveSideEffects())
4615         continue;
4616 
4617       // If the instruction is already in PredBB, check if we can sink its
4618       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4619       // sinking the scalar instruction I, hence it appears in PredBB; but it
4620       // may have failed to sink I's operands (recursively), which we try
4621       // (again) here.
4622       if (I->getParent() == PredBB) {
4623         Worklist.insert(I->op_begin(), I->op_end());
4624         continue;
4625       }
4626 
4627       // It's legal to sink the instruction if all its uses occur in the
4628       // predicated block. Otherwise, there's nothing to do yet, and we may
4629       // need to reanalyze the instruction.
4630       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4631         InstsToReanalyze.push_back(I);
4632         continue;
4633       }
4634 
4635       // Move the instruction to the beginning of the predicated block, and add
4636       // it's operands to the worklist.
4637       I->moveBefore(&*PredBB->getFirstInsertionPt());
4638       Worklist.insert(I->op_begin(), I->op_end());
4639 
4640       // The sinking may have enabled other instructions to be sunk, so we will
4641       // need to iterate.
4642       Changed = true;
4643     }
4644   } while (Changed);
4645 }
4646 
4647 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4648   for (PHINode *OrigPhi : OrigPHIsToFix) {
4649     VPWidenPHIRecipe *VPPhi =
4650         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4651     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4652     // Make sure the builder has a valid insert point.
4653     Builder.SetInsertPoint(NewPhi);
4654     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4655       VPValue *Inc = VPPhi->getIncomingValue(i);
4656       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4657       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4658     }
4659   }
4660 }
4661 
4662 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4663   return Cost->useOrderedReductions(RdxDesc);
4664 }
4665 
4666 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4667                                    VPUser &Operands, unsigned UF,
4668                                    ElementCount VF, bool IsPtrLoopInvariant,
4669                                    SmallBitVector &IsIndexLoopInvariant,
4670                                    VPTransformState &State) {
4671   // Construct a vector GEP by widening the operands of the scalar GEP as
4672   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4673   // results in a vector of pointers when at least one operand of the GEP
4674   // is vector-typed. Thus, to keep the representation compact, we only use
4675   // vector-typed operands for loop-varying values.
4676 
4677   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4678     // If we are vectorizing, but the GEP has only loop-invariant operands,
4679     // the GEP we build (by only using vector-typed operands for
4680     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4681     // produce a vector of pointers, we need to either arbitrarily pick an
4682     // operand to broadcast, or broadcast a clone of the original GEP.
4683     // Here, we broadcast a clone of the original.
4684     //
4685     // TODO: If at some point we decide to scalarize instructions having
4686     //       loop-invariant operands, this special case will no longer be
4687     //       required. We would add the scalarization decision to
4688     //       collectLoopScalars() and teach getVectorValue() to broadcast
4689     //       the lane-zero scalar value.
4690     auto *Clone = Builder.Insert(GEP->clone());
4691     for (unsigned Part = 0; Part < UF; ++Part) {
4692       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4693       State.set(VPDef, EntryPart, Part);
4694       addMetadata(EntryPart, GEP);
4695     }
4696   } else {
4697     // If the GEP has at least one loop-varying operand, we are sure to
4698     // produce a vector of pointers. But if we are only unrolling, we want
4699     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4700     // produce with the code below will be scalar (if VF == 1) or vector
4701     // (otherwise). Note that for the unroll-only case, we still maintain
4702     // values in the vector mapping with initVector, as we do for other
4703     // instructions.
4704     for (unsigned Part = 0; Part < UF; ++Part) {
4705       // The pointer operand of the new GEP. If it's loop-invariant, we
4706       // won't broadcast it.
4707       auto *Ptr = IsPtrLoopInvariant
4708                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4709                       : State.get(Operands.getOperand(0), Part);
4710 
4711       // Collect all the indices for the new GEP. If any index is
4712       // loop-invariant, we won't broadcast it.
4713       SmallVector<Value *, 4> Indices;
4714       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4715         VPValue *Operand = Operands.getOperand(I);
4716         if (IsIndexLoopInvariant[I - 1])
4717           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4718         else
4719           Indices.push_back(State.get(Operand, Part));
4720       }
4721 
4722       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4723       // but it should be a vector, otherwise.
4724       auto *NewGEP =
4725           GEP->isInBounds()
4726               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4727                                           Indices)
4728               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4729       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4730              "NewGEP is not a pointer vector");
4731       State.set(VPDef, NewGEP, Part);
4732       addMetadata(NewGEP, GEP);
4733     }
4734   }
4735 }
4736 
4737 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4738                                               RecurrenceDescriptor *RdxDesc,
4739                                               VPWidenPHIRecipe *PhiR,
4740                                               VPTransformState &State) {
4741   PHINode *P = cast<PHINode>(PN);
4742   if (EnableVPlanNativePath) {
4743     // Currently we enter here in the VPlan-native path for non-induction
4744     // PHIs where all control flow is uniform. We simply widen these PHIs.
4745     // Create a vector phi with no operands - the vector phi operands will be
4746     // set at the end of vector code generation.
4747     Type *VecTy = (State.VF.isScalar())
4748                       ? PN->getType()
4749                       : VectorType::get(PN->getType(), State.VF);
4750     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4751     State.set(PhiR, VecPhi, 0);
4752     OrigPHIsToFix.push_back(P);
4753 
4754     return;
4755   }
4756 
4757   assert(PN->getParent() == OrigLoop->getHeader() &&
4758          "Non-header phis should have been handled elsewhere");
4759 
4760   VPValue *StartVPV = PhiR->getStartValue();
4761   Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr;
4762   // In order to support recurrences we need to be able to vectorize Phi nodes.
4763   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4764   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4765   // this value when we vectorize all of the instructions that use the PHI.
4766   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4767     Value *Iden = nullptr;
4768     bool ScalarPHI =
4769         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4770     Type *VecTy =
4771         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4772 
4773     if (RdxDesc) {
4774       assert(Legal->isReductionVariable(P) && StartV &&
4775              "RdxDesc should only be set for reduction variables; in that case "
4776              "a StartV is also required");
4777       RecurKind RK = RdxDesc->getRecurrenceKind();
4778       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4779         // MinMax reduction have the start value as their identify.
4780         if (ScalarPHI) {
4781           Iden = StartV;
4782         } else {
4783           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4784           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4785           StartV = Iden =
4786               Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4787         }
4788       } else {
4789         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4790             RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags());
4791         Iden = IdenC;
4792 
4793         if (!ScalarPHI) {
4794           Iden = ConstantVector::getSplat(State.VF, IdenC);
4795           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4796           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4797           Constant *Zero = Builder.getInt32(0);
4798           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4799         }
4800       }
4801     }
4802 
4803     bool IsOrdered = State.VF.isVector() &&
4804                      Cost->isInLoopReduction(cast<PHINode>(PN)) &&
4805                      Cost->useOrderedReductions(*RdxDesc);
4806 
4807     for (unsigned Part = 0; Part < State.UF; ++Part) {
4808       // This is phase one of vectorizing PHIs.
4809       if (Part > 0 && IsOrdered)
4810         return;
4811       Value *EntryPart = PHINode::Create(
4812           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4813       State.set(PhiR, EntryPart, Part);
4814       if (StartV) {
4815         // Make sure to add the reduction start value only to the
4816         // first unroll part.
4817         Value *StartVal = (Part == 0) ? StartV : Iden;
4818         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4819       }
4820     }
4821     return;
4822   }
4823 
4824   assert(!Legal->isReductionVariable(P) &&
4825          "reductions should be handled above");
4826 
4827   setDebugLocFromInst(Builder, P);
4828 
4829   // This PHINode must be an induction variable.
4830   // Make sure that we know about it.
4831   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4832 
4833   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4834   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4835 
4836   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4837   // which can be found from the original scalar operations.
4838   switch (II.getKind()) {
4839   case InductionDescriptor::IK_NoInduction:
4840     llvm_unreachable("Unknown induction");
4841   case InductionDescriptor::IK_IntInduction:
4842   case InductionDescriptor::IK_FpInduction:
4843     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4844   case InductionDescriptor::IK_PtrInduction: {
4845     // Handle the pointer induction variable case.
4846     assert(P->getType()->isPointerTy() && "Unexpected type.");
4847 
4848     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4849       // This is the normalized GEP that starts counting at zero.
4850       Value *PtrInd =
4851           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4852       // Determine the number of scalars we need to generate for each unroll
4853       // iteration. If the instruction is uniform, we only need to generate the
4854       // first lane. Otherwise, we generate all VF values.
4855       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4856       unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
4857 
4858       bool NeedsVectorIndex = !IsUniform && VF.isScalable();
4859       Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr;
4860       if (NeedsVectorIndex) {
4861         Type *VecIVTy = VectorType::get(PtrInd->getType(), VF);
4862         UnitStepVec = Builder.CreateStepVector(VecIVTy);
4863         PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd);
4864       }
4865 
4866       for (unsigned Part = 0; Part < UF; ++Part) {
4867         Value *PartStart = createStepForVF(
4868             Builder, ConstantInt::get(PtrInd->getType(), Part), VF);
4869 
4870         if (NeedsVectorIndex) {
4871           Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart);
4872           Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec);
4873           Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices);
4874           Value *SclrGep =
4875               emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II);
4876           SclrGep->setName("next.gep");
4877           State.set(PhiR, SclrGep, Part);
4878           // We've cached the whole vector, which means we can support the
4879           // extraction of any lane.
4880           continue;
4881         }
4882 
4883         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4884           Value *Idx = Builder.CreateAdd(
4885               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4886           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4887           Value *SclrGep =
4888               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4889           SclrGep->setName("next.gep");
4890           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4891         }
4892       }
4893       return;
4894     }
4895     assert(isa<SCEVConstant>(II.getStep()) &&
4896            "Induction step not a SCEV constant!");
4897     Type *PhiType = II.getStep()->getType();
4898 
4899     // Build a pointer phi
4900     Value *ScalarStartValue = II.getStartValue();
4901     Type *ScStValueType = ScalarStartValue->getType();
4902     PHINode *NewPointerPhi =
4903         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4904     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4905 
4906     // A pointer induction, performed by using a gep
4907     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4908     Instruction *InductionLoc = LoopLatch->getTerminator();
4909     const SCEV *ScalarStep = II.getStep();
4910     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4911     Value *ScalarStepValue =
4912         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4913     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4914     Value *NumUnrolledElems =
4915         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4916     Value *InductionGEP = GetElementPtrInst::Create(
4917         ScStValueType->getPointerElementType(), NewPointerPhi,
4918         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4919         InductionLoc);
4920     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4921 
4922     // Create UF many actual address geps that use the pointer
4923     // phi as base and a vectorized version of the step value
4924     // (<step*0, ..., step*N>) as offset.
4925     for (unsigned Part = 0; Part < State.UF; ++Part) {
4926       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4927       Value *StartOffsetScalar =
4928           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4929       Value *StartOffset =
4930           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4931       // Create a vector of consecutive numbers from zero to VF.
4932       StartOffset =
4933           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4934 
4935       Value *GEP = Builder.CreateGEP(
4936           ScStValueType->getPointerElementType(), NewPointerPhi,
4937           Builder.CreateMul(
4938               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4939               "vector.gep"));
4940       State.set(PhiR, GEP, Part);
4941     }
4942   }
4943   }
4944 }
4945 
4946 /// A helper function for checking whether an integer division-related
4947 /// instruction may divide by zero (in which case it must be predicated if
4948 /// executed conditionally in the scalar code).
4949 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4950 /// Non-zero divisors that are non compile-time constants will not be
4951 /// converted into multiplication, so we will still end up scalarizing
4952 /// the division, but can do so w/o predication.
4953 static bool mayDivideByZero(Instruction &I) {
4954   assert((I.getOpcode() == Instruction::UDiv ||
4955           I.getOpcode() == Instruction::SDiv ||
4956           I.getOpcode() == Instruction::URem ||
4957           I.getOpcode() == Instruction::SRem) &&
4958          "Unexpected instruction");
4959   Value *Divisor = I.getOperand(1);
4960   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4961   return !CInt || CInt->isZero();
4962 }
4963 
4964 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4965                                            VPUser &User,
4966                                            VPTransformState &State) {
4967   switch (I.getOpcode()) {
4968   case Instruction::Call:
4969   case Instruction::Br:
4970   case Instruction::PHI:
4971   case Instruction::GetElementPtr:
4972   case Instruction::Select:
4973     llvm_unreachable("This instruction is handled by a different recipe.");
4974   case Instruction::UDiv:
4975   case Instruction::SDiv:
4976   case Instruction::SRem:
4977   case Instruction::URem:
4978   case Instruction::Add:
4979   case Instruction::FAdd:
4980   case Instruction::Sub:
4981   case Instruction::FSub:
4982   case Instruction::FNeg:
4983   case Instruction::Mul:
4984   case Instruction::FMul:
4985   case Instruction::FDiv:
4986   case Instruction::FRem:
4987   case Instruction::Shl:
4988   case Instruction::LShr:
4989   case Instruction::AShr:
4990   case Instruction::And:
4991   case Instruction::Or:
4992   case Instruction::Xor: {
4993     // Just widen unops and binops.
4994     setDebugLocFromInst(Builder, &I);
4995 
4996     for (unsigned Part = 0; Part < UF; ++Part) {
4997       SmallVector<Value *, 2> Ops;
4998       for (VPValue *VPOp : User.operands())
4999         Ops.push_back(State.get(VPOp, Part));
5000 
5001       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
5002 
5003       if (auto *VecOp = dyn_cast<Instruction>(V))
5004         VecOp->copyIRFlags(&I);
5005 
5006       // Use this vector value for all users of the original instruction.
5007       State.set(Def, V, Part);
5008       addMetadata(V, &I);
5009     }
5010 
5011     break;
5012   }
5013   case Instruction::ICmp:
5014   case Instruction::FCmp: {
5015     // Widen compares. Generate vector compares.
5016     bool FCmp = (I.getOpcode() == Instruction::FCmp);
5017     auto *Cmp = cast<CmpInst>(&I);
5018     setDebugLocFromInst(Builder, Cmp);
5019     for (unsigned Part = 0; Part < UF; ++Part) {
5020       Value *A = State.get(User.getOperand(0), Part);
5021       Value *B = State.get(User.getOperand(1), Part);
5022       Value *C = nullptr;
5023       if (FCmp) {
5024         // Propagate fast math flags.
5025         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
5026         Builder.setFastMathFlags(Cmp->getFastMathFlags());
5027         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
5028       } else {
5029         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
5030       }
5031       State.set(Def, C, Part);
5032       addMetadata(C, &I);
5033     }
5034 
5035     break;
5036   }
5037 
5038   case Instruction::ZExt:
5039   case Instruction::SExt:
5040   case Instruction::FPToUI:
5041   case Instruction::FPToSI:
5042   case Instruction::FPExt:
5043   case Instruction::PtrToInt:
5044   case Instruction::IntToPtr:
5045   case Instruction::SIToFP:
5046   case Instruction::UIToFP:
5047   case Instruction::Trunc:
5048   case Instruction::FPTrunc:
5049   case Instruction::BitCast: {
5050     auto *CI = cast<CastInst>(&I);
5051     setDebugLocFromInst(Builder, CI);
5052 
5053     /// Vectorize casts.
5054     Type *DestTy =
5055         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
5056 
5057     for (unsigned Part = 0; Part < UF; ++Part) {
5058       Value *A = State.get(User.getOperand(0), Part);
5059       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
5060       State.set(Def, Cast, Part);
5061       addMetadata(Cast, &I);
5062     }
5063     break;
5064   }
5065   default:
5066     // This instruction is not vectorized by simple widening.
5067     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
5068     llvm_unreachable("Unhandled instruction!");
5069   } // end of switch.
5070 }
5071 
5072 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
5073                                                VPUser &ArgOperands,
5074                                                VPTransformState &State) {
5075   assert(!isa<DbgInfoIntrinsic>(I) &&
5076          "DbgInfoIntrinsic should have been dropped during VPlan construction");
5077   setDebugLocFromInst(Builder, &I);
5078 
5079   Module *M = I.getParent()->getParent()->getParent();
5080   auto *CI = cast<CallInst>(&I);
5081 
5082   SmallVector<Type *, 4> Tys;
5083   for (Value *ArgOperand : CI->arg_operands())
5084     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
5085 
5086   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5087 
5088   // The flag shows whether we use Intrinsic or a usual Call for vectorized
5089   // version of the instruction.
5090   // Is it beneficial to perform intrinsic call compared to lib call?
5091   bool NeedToScalarize = false;
5092   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
5093   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
5094   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
5095   assert((UseVectorIntrinsic || !NeedToScalarize) &&
5096          "Instruction should be scalarized elsewhere.");
5097   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
5098          "Either the intrinsic cost or vector call cost must be valid");
5099 
5100   for (unsigned Part = 0; Part < UF; ++Part) {
5101     SmallVector<Value *, 4> Args;
5102     for (auto &I : enumerate(ArgOperands.operands())) {
5103       // Some intrinsics have a scalar argument - don't replace it with a
5104       // vector.
5105       Value *Arg;
5106       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
5107         Arg = State.get(I.value(), Part);
5108       else
5109         Arg = State.get(I.value(), VPIteration(0, 0));
5110       Args.push_back(Arg);
5111     }
5112 
5113     Function *VectorF;
5114     if (UseVectorIntrinsic) {
5115       // Use vector version of the intrinsic.
5116       Type *TysForDecl[] = {CI->getType()};
5117       if (VF.isVector())
5118         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5119       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5120       assert(VectorF && "Can't retrieve vector intrinsic.");
5121     } else {
5122       // Use vector version of the function call.
5123       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
5124 #ifndef NDEBUG
5125       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
5126              "Can't create vector function.");
5127 #endif
5128         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
5129     }
5130       SmallVector<OperandBundleDef, 1> OpBundles;
5131       CI->getOperandBundlesAsDefs(OpBundles);
5132       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5133 
5134       if (isa<FPMathOperator>(V))
5135         V->copyFastMathFlags(CI);
5136 
5137       State.set(Def, V, Part);
5138       addMetadata(V, &I);
5139   }
5140 }
5141 
5142 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5143                                                  VPUser &Operands,
5144                                                  bool InvariantCond,
5145                                                  VPTransformState &State) {
5146   setDebugLocFromInst(Builder, &I);
5147 
5148   // The condition can be loop invariant  but still defined inside the
5149   // loop. This means that we can't just use the original 'cond' value.
5150   // We have to take the 'vectorized' value and pick the first lane.
5151   // Instcombine will make this a no-op.
5152   auto *InvarCond = InvariantCond
5153                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5154                         : nullptr;
5155 
5156   for (unsigned Part = 0; Part < UF; ++Part) {
5157     Value *Cond =
5158         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5159     Value *Op0 = State.get(Operands.getOperand(1), Part);
5160     Value *Op1 = State.get(Operands.getOperand(2), Part);
5161     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5162     State.set(VPDef, Sel, Part);
5163     addMetadata(Sel, &I);
5164   }
5165 }
5166 
5167 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5168   // We should not collect Scalars more than once per VF. Right now, this
5169   // function is called from collectUniformsAndScalars(), which already does
5170   // this check. Collecting Scalars for VF=1 does not make any sense.
5171   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5172          "This function should not be visited twice for the same VF");
5173 
5174   SmallSetVector<Instruction *, 8> Worklist;
5175 
5176   // These sets are used to seed the analysis with pointers used by memory
5177   // accesses that will remain scalar.
5178   SmallSetVector<Instruction *, 8> ScalarPtrs;
5179   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5180   auto *Latch = TheLoop->getLoopLatch();
5181 
5182   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5183   // The pointer operands of loads and stores will be scalar as long as the
5184   // memory access is not a gather or scatter operation. The value operand of a
5185   // store will remain scalar if the store is scalarized.
5186   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5187     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5188     assert(WideningDecision != CM_Unknown &&
5189            "Widening decision should be ready at this moment");
5190     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5191       if (Ptr == Store->getValueOperand())
5192         return WideningDecision == CM_Scalarize;
5193     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5194            "Ptr is neither a value or pointer operand");
5195     return WideningDecision != CM_GatherScatter;
5196   };
5197 
5198   // A helper that returns true if the given value is a bitcast or
5199   // getelementptr instruction contained in the loop.
5200   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5201     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5202             isa<GetElementPtrInst>(V)) &&
5203            !TheLoop->isLoopInvariant(V);
5204   };
5205 
5206   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5207     if (!isa<PHINode>(Ptr) ||
5208         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5209       return false;
5210     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5211     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5212       return false;
5213     return isScalarUse(MemAccess, Ptr);
5214   };
5215 
5216   // A helper that evaluates a memory access's use of a pointer. If the
5217   // pointer is actually the pointer induction of a loop, it is being
5218   // inserted into Worklist. If the use will be a scalar use, and the
5219   // pointer is only used by memory accesses, we place the pointer in
5220   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5221   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5222     if (isScalarPtrInduction(MemAccess, Ptr)) {
5223       Worklist.insert(cast<Instruction>(Ptr));
5224       Instruction *Update = cast<Instruction>(
5225           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5226       Worklist.insert(Update);
5227       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5228                         << "\n");
5229       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5230                         << "\n");
5231       return;
5232     }
5233     // We only care about bitcast and getelementptr instructions contained in
5234     // the loop.
5235     if (!isLoopVaryingBitCastOrGEP(Ptr))
5236       return;
5237 
5238     // If the pointer has already been identified as scalar (e.g., if it was
5239     // also identified as uniform), there's nothing to do.
5240     auto *I = cast<Instruction>(Ptr);
5241     if (Worklist.count(I))
5242       return;
5243 
5244     // If the use of the pointer will be a scalar use, and all users of the
5245     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5246     // place the pointer in PossibleNonScalarPtrs.
5247     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5248           return isa<LoadInst>(U) || isa<StoreInst>(U);
5249         }))
5250       ScalarPtrs.insert(I);
5251     else
5252       PossibleNonScalarPtrs.insert(I);
5253   };
5254 
5255   // We seed the scalars analysis with three classes of instructions: (1)
5256   // instructions marked uniform-after-vectorization and (2) bitcast,
5257   // getelementptr and (pointer) phi instructions used by memory accesses
5258   // requiring a scalar use.
5259   //
5260   // (1) Add to the worklist all instructions that have been identified as
5261   // uniform-after-vectorization.
5262   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5263 
5264   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5265   // memory accesses requiring a scalar use. The pointer operands of loads and
5266   // stores will be scalar as long as the memory accesses is not a gather or
5267   // scatter operation. The value operand of a store will remain scalar if the
5268   // store is scalarized.
5269   for (auto *BB : TheLoop->blocks())
5270     for (auto &I : *BB) {
5271       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5272         evaluatePtrUse(Load, Load->getPointerOperand());
5273       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5274         evaluatePtrUse(Store, Store->getPointerOperand());
5275         evaluatePtrUse(Store, Store->getValueOperand());
5276       }
5277     }
5278   for (auto *I : ScalarPtrs)
5279     if (!PossibleNonScalarPtrs.count(I)) {
5280       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5281       Worklist.insert(I);
5282     }
5283 
5284   // Insert the forced scalars.
5285   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5286   // induction variable when the PHI user is scalarized.
5287   auto ForcedScalar = ForcedScalars.find(VF);
5288   if (ForcedScalar != ForcedScalars.end())
5289     for (auto *I : ForcedScalar->second)
5290       Worklist.insert(I);
5291 
5292   // Expand the worklist by looking through any bitcasts and getelementptr
5293   // instructions we've already identified as scalar. This is similar to the
5294   // expansion step in collectLoopUniforms(); however, here we're only
5295   // expanding to include additional bitcasts and getelementptr instructions.
5296   unsigned Idx = 0;
5297   while (Idx != Worklist.size()) {
5298     Instruction *Dst = Worklist[Idx++];
5299     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5300       continue;
5301     auto *Src = cast<Instruction>(Dst->getOperand(0));
5302     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5303           auto *J = cast<Instruction>(U);
5304           return !TheLoop->contains(J) || Worklist.count(J) ||
5305                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5306                   isScalarUse(J, Src));
5307         })) {
5308       Worklist.insert(Src);
5309       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5310     }
5311   }
5312 
5313   // An induction variable will remain scalar if all users of the induction
5314   // variable and induction variable update remain scalar.
5315   for (auto &Induction : Legal->getInductionVars()) {
5316     auto *Ind = Induction.first;
5317     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5318 
5319     // If tail-folding is applied, the primary induction variable will be used
5320     // to feed a vector compare.
5321     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5322       continue;
5323 
5324     // Determine if all users of the induction variable are scalar after
5325     // vectorization.
5326     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5327       auto *I = cast<Instruction>(U);
5328       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5329     });
5330     if (!ScalarInd)
5331       continue;
5332 
5333     // Determine if all users of the induction variable update instruction are
5334     // scalar after vectorization.
5335     auto ScalarIndUpdate =
5336         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5337           auto *I = cast<Instruction>(U);
5338           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5339         });
5340     if (!ScalarIndUpdate)
5341       continue;
5342 
5343     // The induction variable and its update instruction will remain scalar.
5344     Worklist.insert(Ind);
5345     Worklist.insert(IndUpdate);
5346     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5347     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5348                       << "\n");
5349   }
5350 
5351   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5352 }
5353 
5354 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
5355   if (!blockNeedsPredication(I->getParent()))
5356     return false;
5357   switch(I->getOpcode()) {
5358   default:
5359     break;
5360   case Instruction::Load:
5361   case Instruction::Store: {
5362     if (!Legal->isMaskRequired(I))
5363       return false;
5364     auto *Ptr = getLoadStorePointerOperand(I);
5365     auto *Ty = getLoadStoreType(I);
5366     const Align Alignment = getLoadStoreAlignment(I);
5367     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5368                                 TTI.isLegalMaskedGather(Ty, Alignment))
5369                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5370                                 TTI.isLegalMaskedScatter(Ty, Alignment));
5371   }
5372   case Instruction::UDiv:
5373   case Instruction::SDiv:
5374   case Instruction::SRem:
5375   case Instruction::URem:
5376     return mayDivideByZero(*I);
5377   }
5378   return false;
5379 }
5380 
5381 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5382     Instruction *I, ElementCount VF) {
5383   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5384   assert(getWideningDecision(I, VF) == CM_Unknown &&
5385          "Decision should not be set yet.");
5386   auto *Group = getInterleavedAccessGroup(I);
5387   assert(Group && "Must have a group.");
5388 
5389   // If the instruction's allocated size doesn't equal it's type size, it
5390   // requires padding and will be scalarized.
5391   auto &DL = I->getModule()->getDataLayout();
5392   auto *ScalarTy = getLoadStoreType(I);
5393   if (hasIrregularType(ScalarTy, DL))
5394     return false;
5395 
5396   // Check if masking is required.
5397   // A Group may need masking for one of two reasons: it resides in a block that
5398   // needs predication, or it was decided to use masking to deal with gaps.
5399   bool PredicatedAccessRequiresMasking =
5400       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5401   bool AccessWithGapsRequiresMasking =
5402       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5403   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5404     return true;
5405 
5406   // If masked interleaving is required, we expect that the user/target had
5407   // enabled it, because otherwise it either wouldn't have been created or
5408   // it should have been invalidated by the CostModel.
5409   assert(useMaskedInterleavedAccesses(TTI) &&
5410          "Masked interleave-groups for predicated accesses are not enabled.");
5411 
5412   auto *Ty = getLoadStoreType(I);
5413   const Align Alignment = getLoadStoreAlignment(I);
5414   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5415                           : TTI.isLegalMaskedStore(Ty, Alignment);
5416 }
5417 
5418 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5419     Instruction *I, ElementCount VF) {
5420   // Get and ensure we have a valid memory instruction.
5421   LoadInst *LI = dyn_cast<LoadInst>(I);
5422   StoreInst *SI = dyn_cast<StoreInst>(I);
5423   assert((LI || SI) && "Invalid memory instruction");
5424 
5425   auto *Ptr = getLoadStorePointerOperand(I);
5426 
5427   // In order to be widened, the pointer should be consecutive, first of all.
5428   if (!Legal->isConsecutivePtr(Ptr))
5429     return false;
5430 
5431   // If the instruction is a store located in a predicated block, it will be
5432   // scalarized.
5433   if (isScalarWithPredication(I))
5434     return false;
5435 
5436   // If the instruction's allocated size doesn't equal it's type size, it
5437   // requires padding and will be scalarized.
5438   auto &DL = I->getModule()->getDataLayout();
5439   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5440   if (hasIrregularType(ScalarTy, DL))
5441     return false;
5442 
5443   return true;
5444 }
5445 
5446 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5447   // We should not collect Uniforms more than once per VF. Right now,
5448   // this function is called from collectUniformsAndScalars(), which
5449   // already does this check. Collecting Uniforms for VF=1 does not make any
5450   // sense.
5451 
5452   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5453          "This function should not be visited twice for the same VF");
5454 
5455   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5456   // not analyze again.  Uniforms.count(VF) will return 1.
5457   Uniforms[VF].clear();
5458 
5459   // We now know that the loop is vectorizable!
5460   // Collect instructions inside the loop that will remain uniform after
5461   // vectorization.
5462 
5463   // Global values, params and instructions outside of current loop are out of
5464   // scope.
5465   auto isOutOfScope = [&](Value *V) -> bool {
5466     Instruction *I = dyn_cast<Instruction>(V);
5467     return (!I || !TheLoop->contains(I));
5468   };
5469 
5470   SetVector<Instruction *> Worklist;
5471   BasicBlock *Latch = TheLoop->getLoopLatch();
5472 
5473   // Instructions that are scalar with predication must not be considered
5474   // uniform after vectorization, because that would create an erroneous
5475   // replicating region where only a single instance out of VF should be formed.
5476   // TODO: optimize such seldom cases if found important, see PR40816.
5477   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5478     if (isOutOfScope(I)) {
5479       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5480                         << *I << "\n");
5481       return;
5482     }
5483     if (isScalarWithPredication(I)) {
5484       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5485                         << *I << "\n");
5486       return;
5487     }
5488     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5489     Worklist.insert(I);
5490   };
5491 
5492   // Start with the conditional branch. If the branch condition is an
5493   // instruction contained in the loop that is only used by the branch, it is
5494   // uniform.
5495   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5496   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5497     addToWorklistIfAllowed(Cmp);
5498 
5499   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5500     InstWidening WideningDecision = getWideningDecision(I, VF);
5501     assert(WideningDecision != CM_Unknown &&
5502            "Widening decision should be ready at this moment");
5503 
5504     // A uniform memory op is itself uniform.  We exclude uniform stores
5505     // here as they demand the last lane, not the first one.
5506     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5507       assert(WideningDecision == CM_Scalarize);
5508       return true;
5509     }
5510 
5511     return (WideningDecision == CM_Widen ||
5512             WideningDecision == CM_Widen_Reverse ||
5513             WideningDecision == CM_Interleave);
5514   };
5515 
5516 
5517   // Returns true if Ptr is the pointer operand of a memory access instruction
5518   // I, and I is known to not require scalarization.
5519   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5520     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5521   };
5522 
5523   // Holds a list of values which are known to have at least one uniform use.
5524   // Note that there may be other uses which aren't uniform.  A "uniform use"
5525   // here is something which only demands lane 0 of the unrolled iterations;
5526   // it does not imply that all lanes produce the same value (e.g. this is not
5527   // the usual meaning of uniform)
5528   SetVector<Value *> HasUniformUse;
5529 
5530   // Scan the loop for instructions which are either a) known to have only
5531   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5532   for (auto *BB : TheLoop->blocks())
5533     for (auto &I : *BB) {
5534       // If there's no pointer operand, there's nothing to do.
5535       auto *Ptr = getLoadStorePointerOperand(&I);
5536       if (!Ptr)
5537         continue;
5538 
5539       // A uniform memory op is itself uniform.  We exclude uniform stores
5540       // here as they demand the last lane, not the first one.
5541       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5542         addToWorklistIfAllowed(&I);
5543 
5544       if (isUniformDecision(&I, VF)) {
5545         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5546         HasUniformUse.insert(Ptr);
5547       }
5548     }
5549 
5550   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5551   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5552   // disallows uses outside the loop as well.
5553   for (auto *V : HasUniformUse) {
5554     if (isOutOfScope(V))
5555       continue;
5556     auto *I = cast<Instruction>(V);
5557     auto UsersAreMemAccesses =
5558       llvm::all_of(I->users(), [&](User *U) -> bool {
5559         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5560       });
5561     if (UsersAreMemAccesses)
5562       addToWorklistIfAllowed(I);
5563   }
5564 
5565   // Expand Worklist in topological order: whenever a new instruction
5566   // is added , its users should be already inside Worklist.  It ensures
5567   // a uniform instruction will only be used by uniform instructions.
5568   unsigned idx = 0;
5569   while (idx != Worklist.size()) {
5570     Instruction *I = Worklist[idx++];
5571 
5572     for (auto OV : I->operand_values()) {
5573       // isOutOfScope operands cannot be uniform instructions.
5574       if (isOutOfScope(OV))
5575         continue;
5576       // First order recurrence Phi's should typically be considered
5577       // non-uniform.
5578       auto *OP = dyn_cast<PHINode>(OV);
5579       if (OP && Legal->isFirstOrderRecurrence(OP))
5580         continue;
5581       // If all the users of the operand are uniform, then add the
5582       // operand into the uniform worklist.
5583       auto *OI = cast<Instruction>(OV);
5584       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5585             auto *J = cast<Instruction>(U);
5586             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5587           }))
5588         addToWorklistIfAllowed(OI);
5589     }
5590   }
5591 
5592   // For an instruction to be added into Worklist above, all its users inside
5593   // the loop should also be in Worklist. However, this condition cannot be
5594   // true for phi nodes that form a cyclic dependence. We must process phi
5595   // nodes separately. An induction variable will remain uniform if all users
5596   // of the induction variable and induction variable update remain uniform.
5597   // The code below handles both pointer and non-pointer induction variables.
5598   for (auto &Induction : Legal->getInductionVars()) {
5599     auto *Ind = Induction.first;
5600     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5601 
5602     // Determine if all users of the induction variable are uniform after
5603     // vectorization.
5604     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5605       auto *I = cast<Instruction>(U);
5606       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5607              isVectorizedMemAccessUse(I, Ind);
5608     });
5609     if (!UniformInd)
5610       continue;
5611 
5612     // Determine if all users of the induction variable update instruction are
5613     // uniform after vectorization.
5614     auto UniformIndUpdate =
5615         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5616           auto *I = cast<Instruction>(U);
5617           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5618                  isVectorizedMemAccessUse(I, IndUpdate);
5619         });
5620     if (!UniformIndUpdate)
5621       continue;
5622 
5623     // The induction variable and its update instruction will remain uniform.
5624     addToWorklistIfAllowed(Ind);
5625     addToWorklistIfAllowed(IndUpdate);
5626   }
5627 
5628   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5629 }
5630 
5631 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5632   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5633 
5634   if (Legal->getRuntimePointerChecking()->Need) {
5635     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5636         "runtime pointer checks needed. Enable vectorization of this "
5637         "loop with '#pragma clang loop vectorize(enable)' when "
5638         "compiling with -Os/-Oz",
5639         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5640     return true;
5641   }
5642 
5643   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5644     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5645         "runtime SCEV checks needed. Enable vectorization of this "
5646         "loop with '#pragma clang loop vectorize(enable)' when "
5647         "compiling with -Os/-Oz",
5648         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5649     return true;
5650   }
5651 
5652   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5653   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5654     reportVectorizationFailure("Runtime stride check for small trip count",
5655         "runtime stride == 1 checks needed. Enable vectorization of "
5656         "this loop without such check by compiling with -Os/-Oz",
5657         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5658     return true;
5659   }
5660 
5661   return false;
5662 }
5663 
5664 ElementCount
5665 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5666   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5667     reportVectorizationInfo(
5668         "Disabling scalable vectorization, because target does not "
5669         "support scalable vectors.",
5670         "ScalableVectorsUnsupported", ORE, TheLoop);
5671     return ElementCount::getScalable(0);
5672   }
5673 
5674   if (Hints->isScalableVectorizationDisabled()) {
5675     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5676                             "ScalableVectorizationDisabled", ORE, TheLoop);
5677     return ElementCount::getScalable(0);
5678   }
5679 
5680   auto MaxScalableVF = ElementCount::getScalable(
5681       std::numeric_limits<ElementCount::ScalarTy>::max());
5682 
5683   // Disable scalable vectorization if the loop contains unsupported reductions.
5684   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5685   // FIXME: While for scalable vectors this is currently sufficient, this should
5686   // be replaced by a more detailed mechanism that filters out specific VFs,
5687   // instead of invalidating vectorization for a whole set of VFs based on the
5688   // MaxVF.
5689   if (!canVectorizeReductions(MaxScalableVF)) {
5690     reportVectorizationInfo(
5691         "Scalable vectorization not supported for the reduction "
5692         "operations found in this loop.",
5693         "ScalableVFUnfeasible", ORE, TheLoop);
5694     return ElementCount::getScalable(0);
5695   }
5696 
5697   if (Legal->isSafeForAnyVectorWidth())
5698     return MaxScalableVF;
5699 
5700   // Limit MaxScalableVF by the maximum safe dependence distance.
5701   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5702   MaxScalableVF = ElementCount::getScalable(
5703       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5704   if (!MaxScalableVF)
5705     reportVectorizationInfo(
5706         "Max legal vector width too small, scalable vectorization "
5707         "unfeasible.",
5708         "ScalableVFUnfeasible", ORE, TheLoop);
5709 
5710   return MaxScalableVF;
5711 }
5712 
5713 FixedScalableVFPair
5714 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5715                                                  ElementCount UserVF) {
5716   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5717   unsigned SmallestType, WidestType;
5718   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5719 
5720   // Get the maximum safe dependence distance in bits computed by LAA.
5721   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5722   // the memory accesses that is most restrictive (involved in the smallest
5723   // dependence distance).
5724   unsigned MaxSafeElements =
5725       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5726 
5727   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5728   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5729 
5730   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5731                     << ".\n");
5732   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5733                     << ".\n");
5734 
5735   // First analyze the UserVF, fall back if the UserVF should be ignored.
5736   if (UserVF) {
5737     auto MaxSafeUserVF =
5738         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5739 
5740     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF))
5741       return UserVF;
5742 
5743     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5744 
5745     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5746     // is better to ignore the hint and let the compiler choose a suitable VF.
5747     if (!UserVF.isScalable()) {
5748       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5749                         << " is unsafe, clamping to max safe VF="
5750                         << MaxSafeFixedVF << ".\n");
5751       ORE->emit([&]() {
5752         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5753                                           TheLoop->getStartLoc(),
5754                                           TheLoop->getHeader())
5755                << "User-specified vectorization factor "
5756                << ore::NV("UserVectorizationFactor", UserVF)
5757                << " is unsafe, clamping to maximum safe vectorization factor "
5758                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5759       });
5760       return MaxSafeFixedVF;
5761     }
5762 
5763     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5764                       << " is unsafe. Ignoring scalable UserVF.\n");
5765     ORE->emit([&]() {
5766       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5767                                         TheLoop->getStartLoc(),
5768                                         TheLoop->getHeader())
5769              << "User-specified vectorization factor "
5770              << ore::NV("UserVectorizationFactor", UserVF)
5771              << " is unsafe. Ignoring the hint to let the compiler pick a "
5772                 "suitable VF.";
5773     });
5774   }
5775 
5776   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5777                     << " / " << WidestType << " bits.\n");
5778 
5779   FixedScalableVFPair Result(ElementCount::getFixed(1),
5780                              ElementCount::getScalable(0));
5781   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5782                                            WidestType, MaxSafeFixedVF))
5783     Result.FixedVF = MaxVF;
5784 
5785   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5786                                            WidestType, MaxSafeScalableVF))
5787     if (MaxVF.isScalable()) {
5788       Result.ScalableVF = MaxVF;
5789       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5790                         << "\n");
5791     }
5792 
5793   return Result;
5794 }
5795 
5796 FixedScalableVFPair
5797 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5798   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5799     // TODO: It may by useful to do since it's still likely to be dynamically
5800     // uniform if the target can skip.
5801     reportVectorizationFailure(
5802         "Not inserting runtime ptr check for divergent target",
5803         "runtime pointer checks needed. Not enabled for divergent target",
5804         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5805     return FixedScalableVFPair::getNone();
5806   }
5807 
5808   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5809   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5810   if (TC == 1) {
5811     reportVectorizationFailure("Single iteration (non) loop",
5812         "loop trip count is one, irrelevant for vectorization",
5813         "SingleIterationLoop", ORE, TheLoop);
5814     return FixedScalableVFPair::getNone();
5815   }
5816 
5817   switch (ScalarEpilogueStatus) {
5818   case CM_ScalarEpilogueAllowed:
5819     return computeFeasibleMaxVF(TC, UserVF);
5820   case CM_ScalarEpilogueNotAllowedUsePredicate:
5821     LLVM_FALLTHROUGH;
5822   case CM_ScalarEpilogueNotNeededUsePredicate:
5823     LLVM_DEBUG(
5824         dbgs() << "LV: vector predicate hint/switch found.\n"
5825                << "LV: Not allowing scalar epilogue, creating predicated "
5826                << "vector loop.\n");
5827     break;
5828   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5829     // fallthrough as a special case of OptForSize
5830   case CM_ScalarEpilogueNotAllowedOptSize:
5831     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5832       LLVM_DEBUG(
5833           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5834     else
5835       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5836                         << "count.\n");
5837 
5838     // Bail if runtime checks are required, which are not good when optimising
5839     // for size.
5840     if (runtimeChecksRequired())
5841       return FixedScalableVFPair::getNone();
5842 
5843     break;
5844   }
5845 
5846   // The only loops we can vectorize without a scalar epilogue, are loops with
5847   // a bottom-test and a single exiting block. We'd have to handle the fact
5848   // that not every instruction executes on the last iteration.  This will
5849   // require a lane mask which varies through the vector loop body.  (TODO)
5850   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5851     // If there was a tail-folding hint/switch, but we can't fold the tail by
5852     // masking, fallback to a vectorization with a scalar epilogue.
5853     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5854       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5855                            "scalar epilogue instead.\n");
5856       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5857       return computeFeasibleMaxVF(TC, UserVF);
5858     }
5859     return FixedScalableVFPair::getNone();
5860   }
5861 
5862   // Now try the tail folding
5863 
5864   // Invalidate interleave groups that require an epilogue if we can't mask
5865   // the interleave-group.
5866   if (!useMaskedInterleavedAccesses(TTI)) {
5867     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5868            "No decisions should have been taken at this point");
5869     // Note: There is no need to invalidate any cost modeling decisions here, as
5870     // non where taken so far.
5871     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5872   }
5873 
5874   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF);
5875   // Avoid tail folding if the trip count is known to be a multiple of any VF
5876   // we chose.
5877   // FIXME: The condition below pessimises the case for fixed-width vectors,
5878   // when scalable VFs are also candidates for vectorization.
5879   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5880     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5881     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5882            "MaxFixedVF must be a power of 2");
5883     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5884                                    : MaxFixedVF.getFixedValue();
5885     ScalarEvolution *SE = PSE.getSE();
5886     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5887     const SCEV *ExitCount = SE->getAddExpr(
5888         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5889     const SCEV *Rem = SE->getURemExpr(
5890         SE->applyLoopGuards(ExitCount, TheLoop),
5891         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5892     if (Rem->isZero()) {
5893       // Accept MaxFixedVF if we do not have a tail.
5894       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5895       return MaxFactors;
5896     }
5897   }
5898 
5899   // If we don't know the precise trip count, or if the trip count that we
5900   // found modulo the vectorization factor is not zero, try to fold the tail
5901   // by masking.
5902   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5903   if (Legal->prepareToFoldTailByMasking()) {
5904     FoldTailByMasking = true;
5905     return MaxFactors;
5906   }
5907 
5908   // If there was a tail-folding hint/switch, but we can't fold the tail by
5909   // masking, fallback to a vectorization with a scalar epilogue.
5910   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5911     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5912                          "scalar epilogue instead.\n");
5913     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5914     return MaxFactors;
5915   }
5916 
5917   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5918     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5919     return FixedScalableVFPair::getNone();
5920   }
5921 
5922   if (TC == 0) {
5923     reportVectorizationFailure(
5924         "Unable to calculate the loop count due to complex control flow",
5925         "unable to calculate the loop count due to complex control flow",
5926         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5927     return FixedScalableVFPair::getNone();
5928   }
5929 
5930   reportVectorizationFailure(
5931       "Cannot optimize for size and vectorize at the same time.",
5932       "cannot optimize for size and vectorize at the same time. "
5933       "Enable vectorization of this loop with '#pragma clang loop "
5934       "vectorize(enable)' when compiling with -Os/-Oz",
5935       "NoTailLoopWithOptForSize", ORE, TheLoop);
5936   return FixedScalableVFPair::getNone();
5937 }
5938 
5939 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5940     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5941     const ElementCount &MaxSafeVF) {
5942   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5943   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5944       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5945                            : TargetTransformInfo::RGK_FixedWidthVector);
5946 
5947   // Convenience function to return the minimum of two ElementCounts.
5948   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5949     assert((LHS.isScalable() == RHS.isScalable()) &&
5950            "Scalable flags must match");
5951     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5952   };
5953 
5954   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5955   // Note that both WidestRegister and WidestType may not be a powers of 2.
5956   auto MaxVectorElementCount = ElementCount::get(
5957       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5958       ComputeScalableMaxVF);
5959   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5960   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5961                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5962 
5963   if (!MaxVectorElementCount) {
5964     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5965     return ElementCount::getFixed(1);
5966   }
5967 
5968   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5969   if (ConstTripCount &&
5970       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5971       isPowerOf2_32(ConstTripCount)) {
5972     // We need to clamp the VF to be the ConstTripCount. There is no point in
5973     // choosing a higher viable VF as done in the loop below. If
5974     // MaxVectorElementCount is scalable, we only fall back on a fixed VF when
5975     // the TC is less than or equal to the known number of lanes.
5976     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5977                       << ConstTripCount << "\n");
5978     return TripCountEC;
5979   }
5980 
5981   ElementCount MaxVF = MaxVectorElementCount;
5982   if (TTI.shouldMaximizeVectorBandwidth() ||
5983       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5984     auto MaxVectorElementCountMaxBW = ElementCount::get(
5985         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5986         ComputeScalableMaxVF);
5987     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5988 
5989     // Collect all viable vectorization factors larger than the default MaxVF
5990     // (i.e. MaxVectorElementCount).
5991     SmallVector<ElementCount, 8> VFs;
5992     for (ElementCount VS = MaxVectorElementCount * 2;
5993          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5994       VFs.push_back(VS);
5995 
5996     // For each VF calculate its register usage.
5997     auto RUs = calculateRegisterUsage(VFs);
5998 
5999     // Select the largest VF which doesn't require more registers than existing
6000     // ones.
6001     for (int i = RUs.size() - 1; i >= 0; --i) {
6002       bool Selected = true;
6003       for (auto &pair : RUs[i].MaxLocalUsers) {
6004         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6005         if (pair.second > TargetNumRegisters)
6006           Selected = false;
6007       }
6008       if (Selected) {
6009         MaxVF = VFs[i];
6010         break;
6011       }
6012     }
6013     if (ElementCount MinVF =
6014             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
6015       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
6016         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
6017                           << ") with target's minimum: " << MinVF << '\n');
6018         MaxVF = MinVF;
6019       }
6020     }
6021   }
6022   return MaxVF;
6023 }
6024 
6025 bool LoopVectorizationCostModel::isMoreProfitable(
6026     const VectorizationFactor &A, const VectorizationFactor &B) const {
6027   InstructionCost::CostType CostA = *A.Cost.getValue();
6028   InstructionCost::CostType CostB = *B.Cost.getValue();
6029 
6030   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
6031 
6032   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
6033       MaxTripCount) {
6034     // If we are folding the tail and the trip count is a known (possibly small)
6035     // constant, the trip count will be rounded up to an integer number of
6036     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
6037     // which we compare directly. When not folding the tail, the total cost will
6038     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
6039     // approximated with the per-lane cost below instead of using the tripcount
6040     // as here.
6041     int64_t RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
6042     int64_t RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
6043     return RTCostA < RTCostB;
6044   }
6045 
6046   // When set to preferred, for now assume vscale may be larger than 1, so
6047   // that scalable vectorization is slightly favorable over fixed-width
6048   // vectorization.
6049   if (Hints->isScalableVectorizationPreferred())
6050     if (A.Width.isScalable() && !B.Width.isScalable())
6051       return (CostA * B.Width.getKnownMinValue()) <=
6052              (CostB * A.Width.getKnownMinValue());
6053 
6054   // To avoid the need for FP division:
6055   //      (CostA / A.Width) < (CostB / B.Width)
6056   // <=>  (CostA * B.Width) < (CostB * A.Width)
6057   return (CostA * B.Width.getKnownMinValue()) <
6058          (CostB * A.Width.getKnownMinValue());
6059 }
6060 
6061 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
6062     const ElementCountSet &VFCandidates) {
6063   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
6064   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
6065   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
6066   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
6067          "Expected Scalar VF to be a candidate");
6068 
6069   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
6070   VectorizationFactor ChosenFactor = ScalarCost;
6071 
6072   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6073   if (ForceVectorization && VFCandidates.size() > 1) {
6074     // Ignore scalar width, because the user explicitly wants vectorization.
6075     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6076     // evaluation.
6077     ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max();
6078   }
6079 
6080   for (const auto &i : VFCandidates) {
6081     // The cost for scalar VF=1 is already calculated, so ignore it.
6082     if (i.isScalar())
6083       continue;
6084 
6085     // Notice that the vector loop needs to be executed less times, so
6086     // we need to divide the cost of the vector loops by the width of
6087     // the vector elements.
6088     VectorizationCostTy C = expectedCost(i);
6089 
6090     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
6091     VectorizationFactor Candidate(i, C.first);
6092     LLVM_DEBUG(
6093         dbgs() << "LV: Vector loop of width " << i << " costs: "
6094                << (*Candidate.Cost.getValue() /
6095                    Candidate.Width.getKnownMinValue())
6096                << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "")
6097                << ".\n");
6098 
6099     if (!C.second && !ForceVectorization) {
6100       LLVM_DEBUG(
6101           dbgs() << "LV: Not considering vector loop of width " << i
6102                  << " because it will not generate any vector instructions.\n");
6103       continue;
6104     }
6105 
6106     // If profitable add it to ProfitableVF list.
6107     if (isMoreProfitable(Candidate, ScalarCost))
6108       ProfitableVFs.push_back(Candidate);
6109 
6110     if (isMoreProfitable(Candidate, ChosenFactor))
6111       ChosenFactor = Candidate;
6112   }
6113 
6114   if (!EnableCondStoresVectorization && NumPredStores) {
6115     reportVectorizationFailure("There are conditional stores.",
6116         "store that is conditionally executed prevents vectorization",
6117         "ConditionalStore", ORE, TheLoop);
6118     ChosenFactor = ScalarCost;
6119   }
6120 
6121   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
6122                  *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue())
6123                  dbgs()
6124              << "LV: Vectorization seems to be not beneficial, "
6125              << "but was forced by a user.\n");
6126   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
6127   return ChosenFactor;
6128 }
6129 
6130 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
6131     const Loop &L, ElementCount VF) const {
6132   // Cross iteration phis such as reductions need special handling and are
6133   // currently unsupported.
6134   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
6135         return Legal->isFirstOrderRecurrence(&Phi) ||
6136                Legal->isReductionVariable(&Phi);
6137       }))
6138     return false;
6139 
6140   // Phis with uses outside of the loop require special handling and are
6141   // currently unsupported.
6142   for (auto &Entry : Legal->getInductionVars()) {
6143     // Look for uses of the value of the induction at the last iteration.
6144     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
6145     for (User *U : PostInc->users())
6146       if (!L.contains(cast<Instruction>(U)))
6147         return false;
6148     // Look for uses of penultimate value of the induction.
6149     for (User *U : Entry.first->users())
6150       if (!L.contains(cast<Instruction>(U)))
6151         return false;
6152   }
6153 
6154   // Induction variables that are widened require special handling that is
6155   // currently not supported.
6156   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
6157         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
6158                  this->isProfitableToScalarize(Entry.first, VF));
6159       }))
6160     return false;
6161 
6162   return true;
6163 }
6164 
6165 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
6166     const ElementCount VF) const {
6167   // FIXME: We need a much better cost-model to take different parameters such
6168   // as register pressure, code size increase and cost of extra branches into
6169   // account. For now we apply a very crude heuristic and only consider loops
6170   // with vectorization factors larger than a certain value.
6171   // We also consider epilogue vectorization unprofitable for targets that don't
6172   // consider interleaving beneficial (eg. MVE).
6173   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
6174     return false;
6175   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
6176     return true;
6177   return false;
6178 }
6179 
6180 VectorizationFactor
6181 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
6182     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
6183   VectorizationFactor Result = VectorizationFactor::Disabled();
6184   if (!EnableEpilogueVectorization) {
6185     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
6186     return Result;
6187   }
6188 
6189   if (!isScalarEpilogueAllowed()) {
6190     LLVM_DEBUG(
6191         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
6192                   "allowed.\n";);
6193     return Result;
6194   }
6195 
6196   // FIXME: This can be fixed for scalable vectors later, because at this stage
6197   // the LoopVectorizer will only consider vectorizing a loop with scalable
6198   // vectors when the loop has a hint to enable vectorization for a given VF.
6199   if (MainLoopVF.isScalable()) {
6200     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
6201                          "yet supported.\n");
6202     return Result;
6203   }
6204 
6205   // Not really a cost consideration, but check for unsupported cases here to
6206   // simplify the logic.
6207   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
6208     LLVM_DEBUG(
6209         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
6210                   "not a supported candidate.\n";);
6211     return Result;
6212   }
6213 
6214   if (EpilogueVectorizationForceVF > 1) {
6215     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6216     if (LVP.hasPlanWithVFs(
6217             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6218       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6219     else {
6220       LLVM_DEBUG(
6221           dbgs()
6222               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6223       return Result;
6224     }
6225   }
6226 
6227   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6228       TheLoop->getHeader()->getParent()->hasMinSize()) {
6229     LLVM_DEBUG(
6230         dbgs()
6231             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6232     return Result;
6233   }
6234 
6235   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6236     return Result;
6237 
6238   for (auto &NextVF : ProfitableVFs)
6239     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6240         (Result.Width.getFixedValue() == 1 ||
6241          isMoreProfitable(NextVF, Result)) &&
6242         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6243       Result = NextVF;
6244 
6245   if (Result != VectorizationFactor::Disabled())
6246     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6247                       << Result.Width.getFixedValue() << "\n";);
6248   return Result;
6249 }
6250 
6251 std::pair<unsigned, unsigned>
6252 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6253   unsigned MinWidth = -1U;
6254   unsigned MaxWidth = 8;
6255   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6256 
6257   // For each block.
6258   for (BasicBlock *BB : TheLoop->blocks()) {
6259     // For each instruction in the loop.
6260     for (Instruction &I : BB->instructionsWithoutDebug()) {
6261       Type *T = I.getType();
6262 
6263       // Skip ignored values.
6264       if (ValuesToIgnore.count(&I))
6265         continue;
6266 
6267       // Only examine Loads, Stores and PHINodes.
6268       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6269         continue;
6270 
6271       // Examine PHI nodes that are reduction variables. Update the type to
6272       // account for the recurrence type.
6273       if (auto *PN = dyn_cast<PHINode>(&I)) {
6274         if (!Legal->isReductionVariable(PN))
6275           continue;
6276         const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN];
6277         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6278             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6279                                       RdxDesc.getRecurrenceType(),
6280                                       TargetTransformInfo::ReductionFlags()))
6281           continue;
6282         T = RdxDesc.getRecurrenceType();
6283       }
6284 
6285       // Examine the stored values.
6286       if (auto *ST = dyn_cast<StoreInst>(&I))
6287         T = ST->getValueOperand()->getType();
6288 
6289       // Ignore loaded pointer types and stored pointer types that are not
6290       // vectorizable.
6291       //
6292       // FIXME: The check here attempts to predict whether a load or store will
6293       //        be vectorized. We only know this for certain after a VF has
6294       //        been selected. Here, we assume that if an access can be
6295       //        vectorized, it will be. We should also look at extending this
6296       //        optimization to non-pointer types.
6297       //
6298       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6299           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6300         continue;
6301 
6302       MinWidth = std::min(MinWidth,
6303                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6304       MaxWidth = std::max(MaxWidth,
6305                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6306     }
6307   }
6308 
6309   return {MinWidth, MaxWidth};
6310 }
6311 
6312 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6313                                                            unsigned LoopCost) {
6314   // -- The interleave heuristics --
6315   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6316   // There are many micro-architectural considerations that we can't predict
6317   // at this level. For example, frontend pressure (on decode or fetch) due to
6318   // code size, or the number and capabilities of the execution ports.
6319   //
6320   // We use the following heuristics to select the interleave count:
6321   // 1. If the code has reductions, then we interleave to break the cross
6322   // iteration dependency.
6323   // 2. If the loop is really small, then we interleave to reduce the loop
6324   // overhead.
6325   // 3. We don't interleave if we think that we will spill registers to memory
6326   // due to the increased register pressure.
6327 
6328   if (!isScalarEpilogueAllowed())
6329     return 1;
6330 
6331   // We used the distance for the interleave count.
6332   if (Legal->getMaxSafeDepDistBytes() != -1U)
6333     return 1;
6334 
6335   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6336   const bool HasReductions = !Legal->getReductionVars().empty();
6337   // Do not interleave loops with a relatively small known or estimated trip
6338   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6339   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6340   // because with the above conditions interleaving can expose ILP and break
6341   // cross iteration dependences for reductions.
6342   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6343       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6344     return 1;
6345 
6346   RegisterUsage R = calculateRegisterUsage({VF})[0];
6347   // We divide by these constants so assume that we have at least one
6348   // instruction that uses at least one register.
6349   for (auto& pair : R.MaxLocalUsers) {
6350     pair.second = std::max(pair.second, 1U);
6351   }
6352 
6353   // We calculate the interleave count using the following formula.
6354   // Subtract the number of loop invariants from the number of available
6355   // registers. These registers are used by all of the interleaved instances.
6356   // Next, divide the remaining registers by the number of registers that is
6357   // required by the loop, in order to estimate how many parallel instances
6358   // fit without causing spills. All of this is rounded down if necessary to be
6359   // a power of two. We want power of two interleave count to simplify any
6360   // addressing operations or alignment considerations.
6361   // We also want power of two interleave counts to ensure that the induction
6362   // variable of the vector loop wraps to zero, when tail is folded by masking;
6363   // this currently happens when OptForSize, in which case IC is set to 1 above.
6364   unsigned IC = UINT_MAX;
6365 
6366   for (auto& pair : R.MaxLocalUsers) {
6367     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6368     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6369                       << " registers of "
6370                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6371     if (VF.isScalar()) {
6372       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6373         TargetNumRegisters = ForceTargetNumScalarRegs;
6374     } else {
6375       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6376         TargetNumRegisters = ForceTargetNumVectorRegs;
6377     }
6378     unsigned MaxLocalUsers = pair.second;
6379     unsigned LoopInvariantRegs = 0;
6380     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6381       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6382 
6383     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6384     // Don't count the induction variable as interleaved.
6385     if (EnableIndVarRegisterHeur) {
6386       TmpIC =
6387           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6388                         std::max(1U, (MaxLocalUsers - 1)));
6389     }
6390 
6391     IC = std::min(IC, TmpIC);
6392   }
6393 
6394   // Clamp the interleave ranges to reasonable counts.
6395   unsigned MaxInterleaveCount =
6396       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6397 
6398   // Check if the user has overridden the max.
6399   if (VF.isScalar()) {
6400     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6401       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6402   } else {
6403     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6404       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6405   }
6406 
6407   // If trip count is known or estimated compile time constant, limit the
6408   // interleave count to be less than the trip count divided by VF, provided it
6409   // is at least 1.
6410   //
6411   // For scalable vectors we can't know if interleaving is beneficial. It may
6412   // not be beneficial for small loops if none of the lanes in the second vector
6413   // iterations is enabled. However, for larger loops, there is likely to be a
6414   // similar benefit as for fixed-width vectors. For now, we choose to leave
6415   // the InterleaveCount as if vscale is '1', although if some information about
6416   // the vector is known (e.g. min vector size), we can make a better decision.
6417   if (BestKnownTC) {
6418     MaxInterleaveCount =
6419         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6420     // Make sure MaxInterleaveCount is greater than 0.
6421     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6422   }
6423 
6424   assert(MaxInterleaveCount > 0 &&
6425          "Maximum interleave count must be greater than 0");
6426 
6427   // Clamp the calculated IC to be between the 1 and the max interleave count
6428   // that the target and trip count allows.
6429   if (IC > MaxInterleaveCount)
6430     IC = MaxInterleaveCount;
6431   else
6432     // Make sure IC is greater than 0.
6433     IC = std::max(1u, IC);
6434 
6435   assert(IC > 0 && "Interleave count must be greater than 0.");
6436 
6437   // If we did not calculate the cost for VF (because the user selected the VF)
6438   // then we calculate the cost of VF here.
6439   if (LoopCost == 0) {
6440     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6441     LoopCost = *expectedCost(VF).first.getValue();
6442   }
6443 
6444   assert(LoopCost && "Non-zero loop cost expected");
6445 
6446   // Interleave if we vectorized this loop and there is a reduction that could
6447   // benefit from interleaving.
6448   if (VF.isVector() && HasReductions) {
6449     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6450     return IC;
6451   }
6452 
6453   // Note that if we've already vectorized the loop we will have done the
6454   // runtime check and so interleaving won't require further checks.
6455   bool InterleavingRequiresRuntimePointerCheck =
6456       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6457 
6458   // We want to interleave small loops in order to reduce the loop overhead and
6459   // potentially expose ILP opportunities.
6460   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6461                     << "LV: IC is " << IC << '\n'
6462                     << "LV: VF is " << VF << '\n');
6463   const bool AggressivelyInterleaveReductions =
6464       TTI.enableAggressiveInterleaving(HasReductions);
6465   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6466     // We assume that the cost overhead is 1 and we use the cost model
6467     // to estimate the cost of the loop and interleave until the cost of the
6468     // loop overhead is about 5% of the cost of the loop.
6469     unsigned SmallIC =
6470         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6471 
6472     // Interleave until store/load ports (estimated by max interleave count) are
6473     // saturated.
6474     unsigned NumStores = Legal->getNumStores();
6475     unsigned NumLoads = Legal->getNumLoads();
6476     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6477     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6478 
6479     // If we have a scalar reduction (vector reductions are already dealt with
6480     // by this point), we can increase the critical path length if the loop
6481     // we're interleaving is inside another loop. Limit, by default to 2, so the
6482     // critical path only gets increased by one reduction operation.
6483     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6484       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6485       SmallIC = std::min(SmallIC, F);
6486       StoresIC = std::min(StoresIC, F);
6487       LoadsIC = std::min(LoadsIC, F);
6488     }
6489 
6490     if (EnableLoadStoreRuntimeInterleave &&
6491         std::max(StoresIC, LoadsIC) > SmallIC) {
6492       LLVM_DEBUG(
6493           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6494       return std::max(StoresIC, LoadsIC);
6495     }
6496 
6497     // If there are scalar reductions and TTI has enabled aggressive
6498     // interleaving for reductions, we will interleave to expose ILP.
6499     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6500         AggressivelyInterleaveReductions) {
6501       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6502       // Interleave no less than SmallIC but not as aggressive as the normal IC
6503       // to satisfy the rare situation when resources are too limited.
6504       return std::max(IC / 2, SmallIC);
6505     } else {
6506       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6507       return SmallIC;
6508     }
6509   }
6510 
6511   // Interleave if this is a large loop (small loops are already dealt with by
6512   // this point) that could benefit from interleaving.
6513   if (AggressivelyInterleaveReductions) {
6514     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6515     return IC;
6516   }
6517 
6518   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6519   return 1;
6520 }
6521 
6522 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6523 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6524   // This function calculates the register usage by measuring the highest number
6525   // of values that are alive at a single location. Obviously, this is a very
6526   // rough estimation. We scan the loop in a topological order in order and
6527   // assign a number to each instruction. We use RPO to ensure that defs are
6528   // met before their users. We assume that each instruction that has in-loop
6529   // users starts an interval. We record every time that an in-loop value is
6530   // used, so we have a list of the first and last occurrences of each
6531   // instruction. Next, we transpose this data structure into a multi map that
6532   // holds the list of intervals that *end* at a specific location. This multi
6533   // map allows us to perform a linear search. We scan the instructions linearly
6534   // and record each time that a new interval starts, by placing it in a set.
6535   // If we find this value in the multi-map then we remove it from the set.
6536   // The max register usage is the maximum size of the set.
6537   // We also search for instructions that are defined outside the loop, but are
6538   // used inside the loop. We need this number separately from the max-interval
6539   // usage number because when we unroll, loop-invariant values do not take
6540   // more register.
6541   LoopBlocksDFS DFS(TheLoop);
6542   DFS.perform(LI);
6543 
6544   RegisterUsage RU;
6545 
6546   // Each 'key' in the map opens a new interval. The values
6547   // of the map are the index of the 'last seen' usage of the
6548   // instruction that is the key.
6549   using IntervalMap = DenseMap<Instruction *, unsigned>;
6550 
6551   // Maps instruction to its index.
6552   SmallVector<Instruction *, 64> IdxToInstr;
6553   // Marks the end of each interval.
6554   IntervalMap EndPoint;
6555   // Saves the list of instruction indices that are used in the loop.
6556   SmallPtrSet<Instruction *, 8> Ends;
6557   // Saves the list of values that are used in the loop but are
6558   // defined outside the loop, such as arguments and constants.
6559   SmallPtrSet<Value *, 8> LoopInvariants;
6560 
6561   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6562     for (Instruction &I : BB->instructionsWithoutDebug()) {
6563       IdxToInstr.push_back(&I);
6564 
6565       // Save the end location of each USE.
6566       for (Value *U : I.operands()) {
6567         auto *Instr = dyn_cast<Instruction>(U);
6568 
6569         // Ignore non-instruction values such as arguments, constants, etc.
6570         if (!Instr)
6571           continue;
6572 
6573         // If this instruction is outside the loop then record it and continue.
6574         if (!TheLoop->contains(Instr)) {
6575           LoopInvariants.insert(Instr);
6576           continue;
6577         }
6578 
6579         // Overwrite previous end points.
6580         EndPoint[Instr] = IdxToInstr.size();
6581         Ends.insert(Instr);
6582       }
6583     }
6584   }
6585 
6586   // Saves the list of intervals that end with the index in 'key'.
6587   using InstrList = SmallVector<Instruction *, 2>;
6588   DenseMap<unsigned, InstrList> TransposeEnds;
6589 
6590   // Transpose the EndPoints to a list of values that end at each index.
6591   for (auto &Interval : EndPoint)
6592     TransposeEnds[Interval.second].push_back(Interval.first);
6593 
6594   SmallPtrSet<Instruction *, 8> OpenIntervals;
6595   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6596   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6597 
6598   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6599 
6600   // A lambda that gets the register usage for the given type and VF.
6601   const auto &TTICapture = TTI;
6602   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6603     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6604       return 0;
6605     return *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6606   };
6607 
6608   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6609     Instruction *I = IdxToInstr[i];
6610 
6611     // Remove all of the instructions that end at this location.
6612     InstrList &List = TransposeEnds[i];
6613     for (Instruction *ToRemove : List)
6614       OpenIntervals.erase(ToRemove);
6615 
6616     // Ignore instructions that are never used within the loop.
6617     if (!Ends.count(I))
6618       continue;
6619 
6620     // Skip ignored values.
6621     if (ValuesToIgnore.count(I))
6622       continue;
6623 
6624     // For each VF find the maximum usage of registers.
6625     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6626       // Count the number of live intervals.
6627       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6628 
6629       if (VFs[j].isScalar()) {
6630         for (auto Inst : OpenIntervals) {
6631           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6632           if (RegUsage.find(ClassID) == RegUsage.end())
6633             RegUsage[ClassID] = 1;
6634           else
6635             RegUsage[ClassID] += 1;
6636         }
6637       } else {
6638         collectUniformsAndScalars(VFs[j]);
6639         for (auto Inst : OpenIntervals) {
6640           // Skip ignored values for VF > 1.
6641           if (VecValuesToIgnore.count(Inst))
6642             continue;
6643           if (isScalarAfterVectorization(Inst, VFs[j])) {
6644             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6645             if (RegUsage.find(ClassID) == RegUsage.end())
6646               RegUsage[ClassID] = 1;
6647             else
6648               RegUsage[ClassID] += 1;
6649           } else {
6650             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6651             if (RegUsage.find(ClassID) == RegUsage.end())
6652               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6653             else
6654               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6655           }
6656         }
6657       }
6658 
6659       for (auto& pair : RegUsage) {
6660         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6661           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6662         else
6663           MaxUsages[j][pair.first] = pair.second;
6664       }
6665     }
6666 
6667     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6668                       << OpenIntervals.size() << '\n');
6669 
6670     // Add the current instruction to the list of open intervals.
6671     OpenIntervals.insert(I);
6672   }
6673 
6674   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6675     SmallMapVector<unsigned, unsigned, 4> Invariant;
6676 
6677     for (auto Inst : LoopInvariants) {
6678       unsigned Usage =
6679           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6680       unsigned ClassID =
6681           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6682       if (Invariant.find(ClassID) == Invariant.end())
6683         Invariant[ClassID] = Usage;
6684       else
6685         Invariant[ClassID] += Usage;
6686     }
6687 
6688     LLVM_DEBUG({
6689       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6690       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6691              << " item\n";
6692       for (const auto &pair : MaxUsages[i]) {
6693         dbgs() << "LV(REG): RegisterClass: "
6694                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6695                << " registers\n";
6696       }
6697       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6698              << " item\n";
6699       for (const auto &pair : Invariant) {
6700         dbgs() << "LV(REG): RegisterClass: "
6701                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6702                << " registers\n";
6703       }
6704     });
6705 
6706     RU.LoopInvariantRegs = Invariant;
6707     RU.MaxLocalUsers = MaxUsages[i];
6708     RUs[i] = RU;
6709   }
6710 
6711   return RUs;
6712 }
6713 
6714 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6715   // TODO: Cost model for emulated masked load/store is completely
6716   // broken. This hack guides the cost model to use an artificially
6717   // high enough value to practically disable vectorization with such
6718   // operations, except where previously deployed legality hack allowed
6719   // using very low cost values. This is to avoid regressions coming simply
6720   // from moving "masked load/store" check from legality to cost model.
6721   // Masked Load/Gather emulation was previously never allowed.
6722   // Limited number of Masked Store/Scatter emulation was allowed.
6723   assert(isPredicatedInst(I) &&
6724          "Expecting a scalar emulated instruction");
6725   return isa<LoadInst>(I) ||
6726          (isa<StoreInst>(I) &&
6727           NumPredStores > NumberOfStoresToPredicate);
6728 }
6729 
6730 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6731   // If we aren't vectorizing the loop, or if we've already collected the
6732   // instructions to scalarize, there's nothing to do. Collection may already
6733   // have occurred if we have a user-selected VF and are now computing the
6734   // expected cost for interleaving.
6735   if (VF.isScalar() || VF.isZero() ||
6736       InstsToScalarize.find(VF) != InstsToScalarize.end())
6737     return;
6738 
6739   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6740   // not profitable to scalarize any instructions, the presence of VF in the
6741   // map will indicate that we've analyzed it already.
6742   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6743 
6744   // Find all the instructions that are scalar with predication in the loop and
6745   // determine if it would be better to not if-convert the blocks they are in.
6746   // If so, we also record the instructions to scalarize.
6747   for (BasicBlock *BB : TheLoop->blocks()) {
6748     if (!blockNeedsPredication(BB))
6749       continue;
6750     for (Instruction &I : *BB)
6751       if (isScalarWithPredication(&I)) {
6752         ScalarCostsTy ScalarCosts;
6753         // Do not apply discount logic if hacked cost is needed
6754         // for emulated masked memrefs.
6755         if (!useEmulatedMaskMemRefHack(&I) &&
6756             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6757           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6758         // Remember that BB will remain after vectorization.
6759         PredicatedBBsAfterVectorization.insert(BB);
6760       }
6761   }
6762 }
6763 
6764 int LoopVectorizationCostModel::computePredInstDiscount(
6765     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6766   assert(!isUniformAfterVectorization(PredInst, VF) &&
6767          "Instruction marked uniform-after-vectorization will be predicated");
6768 
6769   // Initialize the discount to zero, meaning that the scalar version and the
6770   // vector version cost the same.
6771   InstructionCost Discount = 0;
6772 
6773   // Holds instructions to analyze. The instructions we visit are mapped in
6774   // ScalarCosts. Those instructions are the ones that would be scalarized if
6775   // we find that the scalar version costs less.
6776   SmallVector<Instruction *, 8> Worklist;
6777 
6778   // Returns true if the given instruction can be scalarized.
6779   auto canBeScalarized = [&](Instruction *I) -> bool {
6780     // We only attempt to scalarize instructions forming a single-use chain
6781     // from the original predicated block that would otherwise be vectorized.
6782     // Although not strictly necessary, we give up on instructions we know will
6783     // already be scalar to avoid traversing chains that are unlikely to be
6784     // beneficial.
6785     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6786         isScalarAfterVectorization(I, VF))
6787       return false;
6788 
6789     // If the instruction is scalar with predication, it will be analyzed
6790     // separately. We ignore it within the context of PredInst.
6791     if (isScalarWithPredication(I))
6792       return false;
6793 
6794     // If any of the instruction's operands are uniform after vectorization,
6795     // the instruction cannot be scalarized. This prevents, for example, a
6796     // masked load from being scalarized.
6797     //
6798     // We assume we will only emit a value for lane zero of an instruction
6799     // marked uniform after vectorization, rather than VF identical values.
6800     // Thus, if we scalarize an instruction that uses a uniform, we would
6801     // create uses of values corresponding to the lanes we aren't emitting code
6802     // for. This behavior can be changed by allowing getScalarValue to clone
6803     // the lane zero values for uniforms rather than asserting.
6804     for (Use &U : I->operands())
6805       if (auto *J = dyn_cast<Instruction>(U.get()))
6806         if (isUniformAfterVectorization(J, VF))
6807           return false;
6808 
6809     // Otherwise, we can scalarize the instruction.
6810     return true;
6811   };
6812 
6813   // Compute the expected cost discount from scalarizing the entire expression
6814   // feeding the predicated instruction. We currently only consider expressions
6815   // that are single-use instruction chains.
6816   Worklist.push_back(PredInst);
6817   while (!Worklist.empty()) {
6818     Instruction *I = Worklist.pop_back_val();
6819 
6820     // If we've already analyzed the instruction, there's nothing to do.
6821     if (ScalarCosts.find(I) != ScalarCosts.end())
6822       continue;
6823 
6824     // Compute the cost of the vector instruction. Note that this cost already
6825     // includes the scalarization overhead of the predicated instruction.
6826     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6827 
6828     // Compute the cost of the scalarized instruction. This cost is the cost of
6829     // the instruction as if it wasn't if-converted and instead remained in the
6830     // predicated block. We will scale this cost by block probability after
6831     // computing the scalarization overhead.
6832     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6833     InstructionCost ScalarCost =
6834         VF.getKnownMinValue() *
6835         getInstructionCost(I, ElementCount::getFixed(1)).first;
6836 
6837     // Compute the scalarization overhead of needed insertelement instructions
6838     // and phi nodes.
6839     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6840       ScalarCost += TTI.getScalarizationOverhead(
6841           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6842           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6843       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6844       ScalarCost +=
6845           VF.getKnownMinValue() *
6846           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6847     }
6848 
6849     // Compute the scalarization overhead of needed extractelement
6850     // instructions. For each of the instruction's operands, if the operand can
6851     // be scalarized, add it to the worklist; otherwise, account for the
6852     // overhead.
6853     for (Use &U : I->operands())
6854       if (auto *J = dyn_cast<Instruction>(U.get())) {
6855         assert(VectorType::isValidElementType(J->getType()) &&
6856                "Instruction has non-scalar type");
6857         if (canBeScalarized(J))
6858           Worklist.push_back(J);
6859         else if (needsExtract(J, VF)) {
6860           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6861           ScalarCost += TTI.getScalarizationOverhead(
6862               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6863               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6864         }
6865       }
6866 
6867     // Scale the total scalar cost by block probability.
6868     ScalarCost /= getReciprocalPredBlockProb();
6869 
6870     // Compute the discount. A non-negative discount means the vector version
6871     // of the instruction costs more, and scalarizing would be beneficial.
6872     Discount += VectorCost - ScalarCost;
6873     ScalarCosts[I] = ScalarCost;
6874   }
6875 
6876   return *Discount.getValue();
6877 }
6878 
6879 LoopVectorizationCostModel::VectorizationCostTy
6880 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6881   VectorizationCostTy Cost;
6882 
6883   // For each block.
6884   for (BasicBlock *BB : TheLoop->blocks()) {
6885     VectorizationCostTy BlockCost;
6886 
6887     // For each instruction in the old loop.
6888     for (Instruction &I : BB->instructionsWithoutDebug()) {
6889       // Skip ignored values.
6890       if (ValuesToIgnore.count(&I) ||
6891           (VF.isVector() && VecValuesToIgnore.count(&I)))
6892         continue;
6893 
6894       VectorizationCostTy C = getInstructionCost(&I, VF);
6895 
6896       // Check if we should override the cost.
6897       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6898         C.first = InstructionCost(ForceTargetInstructionCost);
6899 
6900       BlockCost.first += C.first;
6901       BlockCost.second |= C.second;
6902       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6903                         << " for VF " << VF << " For instruction: " << I
6904                         << '\n');
6905     }
6906 
6907     // If we are vectorizing a predicated block, it will have been
6908     // if-converted. This means that the block's instructions (aside from
6909     // stores and instructions that may divide by zero) will now be
6910     // unconditionally executed. For the scalar case, we may not always execute
6911     // the predicated block, if it is an if-else block. Thus, scale the block's
6912     // cost by the probability of executing it. blockNeedsPredication from
6913     // Legal is used so as to not include all blocks in tail folded loops.
6914     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6915       BlockCost.first /= getReciprocalPredBlockProb();
6916 
6917     Cost.first += BlockCost.first;
6918     Cost.second |= BlockCost.second;
6919   }
6920 
6921   return Cost;
6922 }
6923 
6924 /// Gets Address Access SCEV after verifying that the access pattern
6925 /// is loop invariant except the induction variable dependence.
6926 ///
6927 /// This SCEV can be sent to the Target in order to estimate the address
6928 /// calculation cost.
6929 static const SCEV *getAddressAccessSCEV(
6930               Value *Ptr,
6931               LoopVectorizationLegality *Legal,
6932               PredicatedScalarEvolution &PSE,
6933               const Loop *TheLoop) {
6934 
6935   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6936   if (!Gep)
6937     return nullptr;
6938 
6939   // We are looking for a gep with all loop invariant indices except for one
6940   // which should be an induction variable.
6941   auto SE = PSE.getSE();
6942   unsigned NumOperands = Gep->getNumOperands();
6943   for (unsigned i = 1; i < NumOperands; ++i) {
6944     Value *Opd = Gep->getOperand(i);
6945     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6946         !Legal->isInductionVariable(Opd))
6947       return nullptr;
6948   }
6949 
6950   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6951   return PSE.getSCEV(Ptr);
6952 }
6953 
6954 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6955   return Legal->hasStride(I->getOperand(0)) ||
6956          Legal->hasStride(I->getOperand(1));
6957 }
6958 
6959 InstructionCost
6960 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6961                                                         ElementCount VF) {
6962   assert(VF.isVector() &&
6963          "Scalarization cost of instruction implies vectorization.");
6964   if (VF.isScalable())
6965     return InstructionCost::getInvalid();
6966 
6967   Type *ValTy = getLoadStoreType(I);
6968   auto SE = PSE.getSE();
6969 
6970   unsigned AS = getLoadStoreAddressSpace(I);
6971   Value *Ptr = getLoadStorePointerOperand(I);
6972   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6973 
6974   // Figure out whether the access is strided and get the stride value
6975   // if it's known in compile time
6976   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6977 
6978   // Get the cost of the scalar memory instruction and address computation.
6979   InstructionCost Cost =
6980       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6981 
6982   // Don't pass *I here, since it is scalar but will actually be part of a
6983   // vectorized loop where the user of it is a vectorized instruction.
6984   const Align Alignment = getLoadStoreAlignment(I);
6985   Cost += VF.getKnownMinValue() *
6986           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6987                               AS, TTI::TCK_RecipThroughput);
6988 
6989   // Get the overhead of the extractelement and insertelement instructions
6990   // we might create due to scalarization.
6991   Cost += getScalarizationOverhead(I, VF);
6992 
6993   // If we have a predicated load/store, it will need extra i1 extracts and
6994   // conditional branches, but may not be executed for each vector lane. Scale
6995   // the cost by the probability of executing the predicated block.
6996   if (isPredicatedInst(I)) {
6997     Cost /= getReciprocalPredBlockProb();
6998 
6999     // Add the cost of an i1 extract and a branch
7000     auto *Vec_i1Ty =
7001         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
7002     Cost += TTI.getScalarizationOverhead(
7003         Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7004         /*Insert=*/false, /*Extract=*/true);
7005     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
7006 
7007     if (useEmulatedMaskMemRefHack(I))
7008       // Artificially setting to a high enough value to practically disable
7009       // vectorization with such operations.
7010       Cost = 3000000;
7011   }
7012 
7013   return Cost;
7014 }
7015 
7016 InstructionCost
7017 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
7018                                                     ElementCount VF) {
7019   Type *ValTy = getLoadStoreType(I);
7020   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7021   Value *Ptr = getLoadStorePointerOperand(I);
7022   unsigned AS = getLoadStoreAddressSpace(I);
7023   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
7024   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7025 
7026   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7027          "Stride should be 1 or -1 for consecutive memory access");
7028   const Align Alignment = getLoadStoreAlignment(I);
7029   InstructionCost Cost = 0;
7030   if (Legal->isMaskRequired(I))
7031     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7032                                       CostKind);
7033   else
7034     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7035                                 CostKind, I);
7036 
7037   bool Reverse = ConsecutiveStride < 0;
7038   if (Reverse)
7039     Cost +=
7040         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7041   return Cost;
7042 }
7043 
7044 InstructionCost
7045 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
7046                                                 ElementCount VF) {
7047   assert(Legal->isUniformMemOp(*I));
7048 
7049   Type *ValTy = getLoadStoreType(I);
7050   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7051   const Align Alignment = getLoadStoreAlignment(I);
7052   unsigned AS = getLoadStoreAddressSpace(I);
7053   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7054   if (isa<LoadInst>(I)) {
7055     return TTI.getAddressComputationCost(ValTy) +
7056            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
7057                                CostKind) +
7058            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7059   }
7060   StoreInst *SI = cast<StoreInst>(I);
7061 
7062   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
7063   return TTI.getAddressComputationCost(ValTy) +
7064          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
7065                              CostKind) +
7066          (isLoopInvariantStoreValue
7067               ? 0
7068               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
7069                                        VF.getKnownMinValue() - 1));
7070 }
7071 
7072 InstructionCost
7073 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7074                                                  ElementCount VF) {
7075   Type *ValTy = getLoadStoreType(I);
7076   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7077   const Align Alignment = getLoadStoreAlignment(I);
7078   const Value *Ptr = getLoadStorePointerOperand(I);
7079 
7080   return TTI.getAddressComputationCost(VectorTy) +
7081          TTI.getGatherScatterOpCost(
7082              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
7083              TargetTransformInfo::TCK_RecipThroughput, I);
7084 }
7085 
7086 InstructionCost
7087 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7088                                                    ElementCount VF) {
7089   // TODO: Once we have support for interleaving with scalable vectors
7090   // we can calculate the cost properly here.
7091   if (VF.isScalable())
7092     return InstructionCost::getInvalid();
7093 
7094   Type *ValTy = getLoadStoreType(I);
7095   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7096   unsigned AS = getLoadStoreAddressSpace(I);
7097 
7098   auto Group = getInterleavedAccessGroup(I);
7099   assert(Group && "Fail to get an interleaved access group.");
7100 
7101   unsigned InterleaveFactor = Group->getFactor();
7102   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7103 
7104   // Holds the indices of existing members in an interleaved load group.
7105   // An interleaved store group doesn't need this as it doesn't allow gaps.
7106   SmallVector<unsigned, 4> Indices;
7107   if (isa<LoadInst>(I)) {
7108     for (unsigned i = 0; i < InterleaveFactor; i++)
7109       if (Group->getMember(i))
7110         Indices.push_back(i);
7111   }
7112 
7113   // Calculate the cost of the whole interleaved group.
7114   bool UseMaskForGaps =
7115       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
7116   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
7117       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
7118       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
7119 
7120   if (Group->isReverse()) {
7121     // TODO: Add support for reversed masked interleaved access.
7122     assert(!Legal->isMaskRequired(I) &&
7123            "Reverse masked interleaved access not supported.");
7124     Cost +=
7125         Group->getNumMembers() *
7126         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7127   }
7128   return Cost;
7129 }
7130 
7131 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
7132     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
7133   // Early exit for no inloop reductions
7134   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
7135     return InstructionCost::getInvalid();
7136   auto *VectorTy = cast<VectorType>(Ty);
7137 
7138   // We are looking for a pattern of, and finding the minimal acceptable cost:
7139   //  reduce(mul(ext(A), ext(B))) or
7140   //  reduce(mul(A, B)) or
7141   //  reduce(ext(A)) or
7142   //  reduce(A).
7143   // The basic idea is that we walk down the tree to do that, finding the root
7144   // reduction instruction in InLoopReductionImmediateChains. From there we find
7145   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
7146   // of the components. If the reduction cost is lower then we return it for the
7147   // reduction instruction and 0 for the other instructions in the pattern. If
7148   // it is not we return an invalid cost specifying the orignal cost method
7149   // should be used.
7150   Instruction *RetI = I;
7151   if ((RetI->getOpcode() == Instruction::SExt ||
7152        RetI->getOpcode() == Instruction::ZExt)) {
7153     if (!RetI->hasOneUser())
7154       return InstructionCost::getInvalid();
7155     RetI = RetI->user_back();
7156   }
7157   if (RetI->getOpcode() == Instruction::Mul &&
7158       RetI->user_back()->getOpcode() == Instruction::Add) {
7159     if (!RetI->hasOneUser())
7160       return InstructionCost::getInvalid();
7161     RetI = RetI->user_back();
7162   }
7163 
7164   // Test if the found instruction is a reduction, and if not return an invalid
7165   // cost specifying the parent to use the original cost modelling.
7166   if (!InLoopReductionImmediateChains.count(RetI))
7167     return InstructionCost::getInvalid();
7168 
7169   // Find the reduction this chain is a part of and calculate the basic cost of
7170   // the reduction on its own.
7171   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
7172   Instruction *ReductionPhi = LastChain;
7173   while (!isa<PHINode>(ReductionPhi))
7174     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
7175 
7176   const RecurrenceDescriptor &RdxDesc =
7177       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
7178   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
7179       RdxDesc.getOpcode(), VectorTy, false, CostKind);
7180 
7181   // Get the operand that was not the reduction chain and match it to one of the
7182   // patterns, returning the better cost if it is found.
7183   Instruction *RedOp = RetI->getOperand(1) == LastChain
7184                            ? dyn_cast<Instruction>(RetI->getOperand(0))
7185                            : dyn_cast<Instruction>(RetI->getOperand(1));
7186 
7187   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7188 
7189   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
7190       !TheLoop->isLoopInvariant(RedOp)) {
7191     bool IsUnsigned = isa<ZExtInst>(RedOp);
7192     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7193     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7194         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7195         CostKind);
7196 
7197     InstructionCost ExtCost =
7198         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7199                              TTI::CastContextHint::None, CostKind, RedOp);
7200     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7201       return I == RetI ? *RedCost.getValue() : 0;
7202   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
7203     Instruction *Mul = RedOp;
7204     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
7205     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
7206     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
7207         Op0->getOpcode() == Op1->getOpcode() &&
7208         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7209         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7210       bool IsUnsigned = isa<ZExtInst>(Op0);
7211       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7212       // reduce(mul(ext, ext))
7213       InstructionCost ExtCost =
7214           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7215                                TTI::CastContextHint::None, CostKind, Op0);
7216       InstructionCost MulCost =
7217           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7218 
7219       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7220           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7221           CostKind);
7222 
7223       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7224         return I == RetI ? *RedCost.getValue() : 0;
7225     } else {
7226       InstructionCost MulCost =
7227           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7228 
7229       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7230           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7231           CostKind);
7232 
7233       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7234         return I == RetI ? *RedCost.getValue() : 0;
7235     }
7236   }
7237 
7238   return I == RetI ? BaseCost : InstructionCost::getInvalid();
7239 }
7240 
7241 InstructionCost
7242 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7243                                                      ElementCount VF) {
7244   // Calculate scalar cost only. Vectorization cost should be ready at this
7245   // moment.
7246   if (VF.isScalar()) {
7247     Type *ValTy = getLoadStoreType(I);
7248     const Align Alignment = getLoadStoreAlignment(I);
7249     unsigned AS = getLoadStoreAddressSpace(I);
7250 
7251     return TTI.getAddressComputationCost(ValTy) +
7252            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7253                                TTI::TCK_RecipThroughput, I);
7254   }
7255   return getWideningCost(I, VF);
7256 }
7257 
7258 LoopVectorizationCostModel::VectorizationCostTy
7259 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7260                                                ElementCount VF) {
7261   // If we know that this instruction will remain uniform, check the cost of
7262   // the scalar version.
7263   if (isUniformAfterVectorization(I, VF))
7264     VF = ElementCount::getFixed(1);
7265 
7266   if (VF.isVector() && isProfitableToScalarize(I, VF))
7267     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7268 
7269   // Forced scalars do not have any scalarization overhead.
7270   auto ForcedScalar = ForcedScalars.find(VF);
7271   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7272     auto InstSet = ForcedScalar->second;
7273     if (InstSet.count(I))
7274       return VectorizationCostTy(
7275           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7276            VF.getKnownMinValue()),
7277           false);
7278   }
7279 
7280   Type *VectorTy;
7281   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7282 
7283   bool TypeNotScalarized =
7284       VF.isVector() && VectorTy->isVectorTy() &&
7285       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7286   return VectorizationCostTy(C, TypeNotScalarized);
7287 }
7288 
7289 InstructionCost
7290 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7291                                                      ElementCount VF) const {
7292 
7293   if (VF.isScalable())
7294     return InstructionCost::getInvalid();
7295 
7296   if (VF.isScalar())
7297     return 0;
7298 
7299   InstructionCost Cost = 0;
7300   Type *RetTy = ToVectorTy(I->getType(), VF);
7301   if (!RetTy->isVoidTy() &&
7302       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7303     Cost += TTI.getScalarizationOverhead(
7304         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7305         true, false);
7306 
7307   // Some targets keep addresses scalar.
7308   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7309     return Cost;
7310 
7311   // Some targets support efficient element stores.
7312   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7313     return Cost;
7314 
7315   // Collect operands to consider.
7316   CallInst *CI = dyn_cast<CallInst>(I);
7317   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7318 
7319   // Skip operands that do not require extraction/scalarization and do not incur
7320   // any overhead.
7321   SmallVector<Type *> Tys;
7322   for (auto *V : filterExtractingOperands(Ops, VF))
7323     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7324   return Cost + TTI.getOperandsScalarizationOverhead(
7325                     filterExtractingOperands(Ops, VF), Tys);
7326 }
7327 
7328 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7329   if (VF.isScalar())
7330     return;
7331   NumPredStores = 0;
7332   for (BasicBlock *BB : TheLoop->blocks()) {
7333     // For each instruction in the old loop.
7334     for (Instruction &I : *BB) {
7335       Value *Ptr =  getLoadStorePointerOperand(&I);
7336       if (!Ptr)
7337         continue;
7338 
7339       // TODO: We should generate better code and update the cost model for
7340       // predicated uniform stores. Today they are treated as any other
7341       // predicated store (see added test cases in
7342       // invariant-store-vectorization.ll).
7343       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7344         NumPredStores++;
7345 
7346       if (Legal->isUniformMemOp(I)) {
7347         // TODO: Avoid replicating loads and stores instead of
7348         // relying on instcombine to remove them.
7349         // Load: Scalar load + broadcast
7350         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7351         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7352         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7353         continue;
7354       }
7355 
7356       // We assume that widening is the best solution when possible.
7357       if (memoryInstructionCanBeWidened(&I, VF)) {
7358         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7359         int ConsecutiveStride =
7360                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7361         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7362                "Expected consecutive stride.");
7363         InstWidening Decision =
7364             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7365         setWideningDecision(&I, VF, Decision, Cost);
7366         continue;
7367       }
7368 
7369       // Choose between Interleaving, Gather/Scatter or Scalarization.
7370       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7371       unsigned NumAccesses = 1;
7372       if (isAccessInterleaved(&I)) {
7373         auto Group = getInterleavedAccessGroup(&I);
7374         assert(Group && "Fail to get an interleaved access group.");
7375 
7376         // Make one decision for the whole group.
7377         if (getWideningDecision(&I, VF) != CM_Unknown)
7378           continue;
7379 
7380         NumAccesses = Group->getNumMembers();
7381         if (interleavedAccessCanBeWidened(&I, VF))
7382           InterleaveCost = getInterleaveGroupCost(&I, VF);
7383       }
7384 
7385       InstructionCost GatherScatterCost =
7386           isLegalGatherOrScatter(&I)
7387               ? getGatherScatterCost(&I, VF) * NumAccesses
7388               : InstructionCost::getInvalid();
7389 
7390       InstructionCost ScalarizationCost =
7391           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7392 
7393       // Choose better solution for the current VF,
7394       // write down this decision and use it during vectorization.
7395       InstructionCost Cost;
7396       InstWidening Decision;
7397       if (InterleaveCost <= GatherScatterCost &&
7398           InterleaveCost < ScalarizationCost) {
7399         Decision = CM_Interleave;
7400         Cost = InterleaveCost;
7401       } else if (GatherScatterCost < ScalarizationCost) {
7402         Decision = CM_GatherScatter;
7403         Cost = GatherScatterCost;
7404       } else {
7405         assert(!VF.isScalable() &&
7406                "We cannot yet scalarise for scalable vectors");
7407         Decision = CM_Scalarize;
7408         Cost = ScalarizationCost;
7409       }
7410       // If the instructions belongs to an interleave group, the whole group
7411       // receives the same decision. The whole group receives the cost, but
7412       // the cost will actually be assigned to one instruction.
7413       if (auto Group = getInterleavedAccessGroup(&I))
7414         setWideningDecision(Group, VF, Decision, Cost);
7415       else
7416         setWideningDecision(&I, VF, Decision, Cost);
7417     }
7418   }
7419 
7420   // Make sure that any load of address and any other address computation
7421   // remains scalar unless there is gather/scatter support. This avoids
7422   // inevitable extracts into address registers, and also has the benefit of
7423   // activating LSR more, since that pass can't optimize vectorized
7424   // addresses.
7425   if (TTI.prefersVectorizedAddressing())
7426     return;
7427 
7428   // Start with all scalar pointer uses.
7429   SmallPtrSet<Instruction *, 8> AddrDefs;
7430   for (BasicBlock *BB : TheLoop->blocks())
7431     for (Instruction &I : *BB) {
7432       Instruction *PtrDef =
7433         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7434       if (PtrDef && TheLoop->contains(PtrDef) &&
7435           getWideningDecision(&I, VF) != CM_GatherScatter)
7436         AddrDefs.insert(PtrDef);
7437     }
7438 
7439   // Add all instructions used to generate the addresses.
7440   SmallVector<Instruction *, 4> Worklist;
7441   append_range(Worklist, AddrDefs);
7442   while (!Worklist.empty()) {
7443     Instruction *I = Worklist.pop_back_val();
7444     for (auto &Op : I->operands())
7445       if (auto *InstOp = dyn_cast<Instruction>(Op))
7446         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7447             AddrDefs.insert(InstOp).second)
7448           Worklist.push_back(InstOp);
7449   }
7450 
7451   for (auto *I : AddrDefs) {
7452     if (isa<LoadInst>(I)) {
7453       // Setting the desired widening decision should ideally be handled in
7454       // by cost functions, but since this involves the task of finding out
7455       // if the loaded register is involved in an address computation, it is
7456       // instead changed here when we know this is the case.
7457       InstWidening Decision = getWideningDecision(I, VF);
7458       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7459         // Scalarize a widened load of address.
7460         setWideningDecision(
7461             I, VF, CM_Scalarize,
7462             (VF.getKnownMinValue() *
7463              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7464       else if (auto Group = getInterleavedAccessGroup(I)) {
7465         // Scalarize an interleave group of address loads.
7466         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7467           if (Instruction *Member = Group->getMember(I))
7468             setWideningDecision(
7469                 Member, VF, CM_Scalarize,
7470                 (VF.getKnownMinValue() *
7471                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7472         }
7473       }
7474     } else
7475       // Make sure I gets scalarized and a cost estimate without
7476       // scalarization overhead.
7477       ForcedScalars[VF].insert(I);
7478   }
7479 }
7480 
7481 InstructionCost
7482 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7483                                                Type *&VectorTy) {
7484   Type *RetTy = I->getType();
7485   if (canTruncateToMinimalBitwidth(I, VF))
7486     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7487   auto SE = PSE.getSE();
7488   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7489 
7490   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7491                                                 ElementCount VF) -> bool {
7492     if (VF.isScalar())
7493       return true;
7494 
7495     auto Scalarized = InstsToScalarize.find(VF);
7496     assert(Scalarized != InstsToScalarize.end() &&
7497            "VF not yet analyzed for scalarization profitability");
7498     return !Scalarized->second.count(I) &&
7499            llvm::all_of(I->users(), [&](User *U) {
7500              auto *UI = cast<Instruction>(U);
7501              return !Scalarized->second.count(UI);
7502            });
7503   };
7504   (void) hasSingleCopyAfterVectorization;
7505 
7506   if (isScalarAfterVectorization(I, VF)) {
7507     // With the exception of GEPs and PHIs, after scalarization there should
7508     // only be one copy of the instruction generated in the loop. This is
7509     // because the VF is either 1, or any instructions that need scalarizing
7510     // have already been dealt with by the the time we get here. As a result,
7511     // it means we don't have to multiply the instruction cost by VF.
7512     assert(I->getOpcode() == Instruction::GetElementPtr ||
7513            I->getOpcode() == Instruction::PHI ||
7514            (I->getOpcode() == Instruction::BitCast &&
7515             I->getType()->isPointerTy()) ||
7516            hasSingleCopyAfterVectorization(I, VF));
7517     VectorTy = RetTy;
7518   } else
7519     VectorTy = ToVectorTy(RetTy, VF);
7520 
7521   // TODO: We need to estimate the cost of intrinsic calls.
7522   switch (I->getOpcode()) {
7523   case Instruction::GetElementPtr:
7524     // We mark this instruction as zero-cost because the cost of GEPs in
7525     // vectorized code depends on whether the corresponding memory instruction
7526     // is scalarized or not. Therefore, we handle GEPs with the memory
7527     // instruction cost.
7528     return 0;
7529   case Instruction::Br: {
7530     // In cases of scalarized and predicated instructions, there will be VF
7531     // predicated blocks in the vectorized loop. Each branch around these
7532     // blocks requires also an extract of its vector compare i1 element.
7533     bool ScalarPredicatedBB = false;
7534     BranchInst *BI = cast<BranchInst>(I);
7535     if (VF.isVector() && BI->isConditional() &&
7536         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7537          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7538       ScalarPredicatedBB = true;
7539 
7540     if (ScalarPredicatedBB) {
7541       // Return cost for branches around scalarized and predicated blocks.
7542       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7543       auto *Vec_i1Ty =
7544           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7545       return (TTI.getScalarizationOverhead(
7546                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7547                   false, true) +
7548               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7549                VF.getKnownMinValue()));
7550     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7551       // The back-edge branch will remain, as will all scalar branches.
7552       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7553     else
7554       // This branch will be eliminated by if-conversion.
7555       return 0;
7556     // Note: We currently assume zero cost for an unconditional branch inside
7557     // a predicated block since it will become a fall-through, although we
7558     // may decide in the future to call TTI for all branches.
7559   }
7560   case Instruction::PHI: {
7561     auto *Phi = cast<PHINode>(I);
7562 
7563     // First-order recurrences are replaced by vector shuffles inside the loop.
7564     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7565     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7566       return TTI.getShuffleCost(
7567           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7568           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7569 
7570     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7571     // converted into select instructions. We require N - 1 selects per phi
7572     // node, where N is the number of incoming values.
7573     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7574       return (Phi->getNumIncomingValues() - 1) *
7575              TTI.getCmpSelInstrCost(
7576                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7577                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7578                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7579 
7580     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7581   }
7582   case Instruction::UDiv:
7583   case Instruction::SDiv:
7584   case Instruction::URem:
7585   case Instruction::SRem:
7586     // If we have a predicated instruction, it may not be executed for each
7587     // vector lane. Get the scalarization cost and scale this amount by the
7588     // probability of executing the predicated block. If the instruction is not
7589     // predicated, we fall through to the next case.
7590     if (VF.isVector() && isScalarWithPredication(I)) {
7591       InstructionCost Cost = 0;
7592 
7593       // These instructions have a non-void type, so account for the phi nodes
7594       // that we will create. This cost is likely to be zero. The phi node
7595       // cost, if any, should be scaled by the block probability because it
7596       // models a copy at the end of each predicated block.
7597       Cost += VF.getKnownMinValue() *
7598               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7599 
7600       // The cost of the non-predicated instruction.
7601       Cost += VF.getKnownMinValue() *
7602               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7603 
7604       // The cost of insertelement and extractelement instructions needed for
7605       // scalarization.
7606       Cost += getScalarizationOverhead(I, VF);
7607 
7608       // Scale the cost by the probability of executing the predicated blocks.
7609       // This assumes the predicated block for each vector lane is equally
7610       // likely.
7611       return Cost / getReciprocalPredBlockProb();
7612     }
7613     LLVM_FALLTHROUGH;
7614   case Instruction::Add:
7615   case Instruction::FAdd:
7616   case Instruction::Sub:
7617   case Instruction::FSub:
7618   case Instruction::Mul:
7619   case Instruction::FMul:
7620   case Instruction::FDiv:
7621   case Instruction::FRem:
7622   case Instruction::Shl:
7623   case Instruction::LShr:
7624   case Instruction::AShr:
7625   case Instruction::And:
7626   case Instruction::Or:
7627   case Instruction::Xor: {
7628     // Since we will replace the stride by 1 the multiplication should go away.
7629     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7630       return 0;
7631 
7632     // Detect reduction patterns
7633     InstructionCost RedCost;
7634     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7635             .isValid())
7636       return RedCost;
7637 
7638     // Certain instructions can be cheaper to vectorize if they have a constant
7639     // second vector operand. One example of this are shifts on x86.
7640     Value *Op2 = I->getOperand(1);
7641     TargetTransformInfo::OperandValueProperties Op2VP;
7642     TargetTransformInfo::OperandValueKind Op2VK =
7643         TTI.getOperandInfo(Op2, Op2VP);
7644     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7645       Op2VK = TargetTransformInfo::OK_UniformValue;
7646 
7647     SmallVector<const Value *, 4> Operands(I->operand_values());
7648     return TTI.getArithmeticInstrCost(
7649         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7650         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7651   }
7652   case Instruction::FNeg: {
7653     return TTI.getArithmeticInstrCost(
7654         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7655         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7656         TargetTransformInfo::OP_None, I->getOperand(0), I);
7657   }
7658   case Instruction::Select: {
7659     SelectInst *SI = cast<SelectInst>(I);
7660     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7661     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7662 
7663     const Value *Op0, *Op1;
7664     using namespace llvm::PatternMatch;
7665     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7666                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7667       // select x, y, false --> x & y
7668       // select x, true, y --> x | y
7669       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7670       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7671       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7672       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7673       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7674               Op1->getType()->getScalarSizeInBits() == 1);
7675 
7676       SmallVector<const Value *, 2> Operands{Op0, Op1};
7677       return TTI.getArithmeticInstrCost(
7678           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7679           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7680     }
7681 
7682     Type *CondTy = SI->getCondition()->getType();
7683     if (!ScalarCond)
7684       CondTy = VectorType::get(CondTy, VF);
7685     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7686                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7687   }
7688   case Instruction::ICmp:
7689   case Instruction::FCmp: {
7690     Type *ValTy = I->getOperand(0)->getType();
7691     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7692     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7693       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7694     VectorTy = ToVectorTy(ValTy, VF);
7695     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7696                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7697   }
7698   case Instruction::Store:
7699   case Instruction::Load: {
7700     ElementCount Width = VF;
7701     if (Width.isVector()) {
7702       InstWidening Decision = getWideningDecision(I, Width);
7703       assert(Decision != CM_Unknown &&
7704              "CM decision should be taken at this point");
7705       if (Decision == CM_Scalarize)
7706         Width = ElementCount::getFixed(1);
7707     }
7708     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7709     return getMemoryInstructionCost(I, VF);
7710   }
7711   case Instruction::BitCast:
7712     if (I->getType()->isPointerTy())
7713       return 0;
7714     LLVM_FALLTHROUGH;
7715   case Instruction::ZExt:
7716   case Instruction::SExt:
7717   case Instruction::FPToUI:
7718   case Instruction::FPToSI:
7719   case Instruction::FPExt:
7720   case Instruction::PtrToInt:
7721   case Instruction::IntToPtr:
7722   case Instruction::SIToFP:
7723   case Instruction::UIToFP:
7724   case Instruction::Trunc:
7725   case Instruction::FPTrunc: {
7726     // Computes the CastContextHint from a Load/Store instruction.
7727     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7728       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7729              "Expected a load or a store!");
7730 
7731       if (VF.isScalar() || !TheLoop->contains(I))
7732         return TTI::CastContextHint::Normal;
7733 
7734       switch (getWideningDecision(I, VF)) {
7735       case LoopVectorizationCostModel::CM_GatherScatter:
7736         return TTI::CastContextHint::GatherScatter;
7737       case LoopVectorizationCostModel::CM_Interleave:
7738         return TTI::CastContextHint::Interleave;
7739       case LoopVectorizationCostModel::CM_Scalarize:
7740       case LoopVectorizationCostModel::CM_Widen:
7741         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7742                                         : TTI::CastContextHint::Normal;
7743       case LoopVectorizationCostModel::CM_Widen_Reverse:
7744         return TTI::CastContextHint::Reversed;
7745       case LoopVectorizationCostModel::CM_Unknown:
7746         llvm_unreachable("Instr did not go through cost modelling?");
7747       }
7748 
7749       llvm_unreachable("Unhandled case!");
7750     };
7751 
7752     unsigned Opcode = I->getOpcode();
7753     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7754     // For Trunc, the context is the only user, which must be a StoreInst.
7755     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7756       if (I->hasOneUse())
7757         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7758           CCH = ComputeCCH(Store);
7759     }
7760     // For Z/Sext, the context is the operand, which must be a LoadInst.
7761     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7762              Opcode == Instruction::FPExt) {
7763       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7764         CCH = ComputeCCH(Load);
7765     }
7766 
7767     // We optimize the truncation of induction variables having constant
7768     // integer steps. The cost of these truncations is the same as the scalar
7769     // operation.
7770     if (isOptimizableIVTruncate(I, VF)) {
7771       auto *Trunc = cast<TruncInst>(I);
7772       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7773                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7774     }
7775 
7776     // Detect reduction patterns
7777     InstructionCost RedCost;
7778     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7779             .isValid())
7780       return RedCost;
7781 
7782     Type *SrcScalarTy = I->getOperand(0)->getType();
7783     Type *SrcVecTy =
7784         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7785     if (canTruncateToMinimalBitwidth(I, VF)) {
7786       // This cast is going to be shrunk. This may remove the cast or it might
7787       // turn it into slightly different cast. For example, if MinBW == 16,
7788       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7789       //
7790       // Calculate the modified src and dest types.
7791       Type *MinVecTy = VectorTy;
7792       if (Opcode == Instruction::Trunc) {
7793         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7794         VectorTy =
7795             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7796       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7797         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7798         VectorTy =
7799             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7800       }
7801     }
7802 
7803     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7804   }
7805   case Instruction::Call: {
7806     bool NeedToScalarize;
7807     CallInst *CI = cast<CallInst>(I);
7808     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7809     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7810       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7811       return std::min(CallCost, IntrinsicCost);
7812     }
7813     return CallCost;
7814   }
7815   case Instruction::ExtractValue:
7816     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7817   default:
7818     // This opcode is unknown. Assume that it is the same as 'mul'.
7819     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7820   } // end of switch.
7821 }
7822 
7823 char LoopVectorize::ID = 0;
7824 
7825 static const char lv_name[] = "Loop Vectorization";
7826 
7827 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7828 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7829 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7830 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7831 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7832 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7833 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7834 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7835 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7836 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7837 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7838 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7839 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7840 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7841 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7842 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7843 
7844 namespace llvm {
7845 
7846 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7847 
7848 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7849                               bool VectorizeOnlyWhenForced) {
7850   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7851 }
7852 
7853 } // end namespace llvm
7854 
7855 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7856   // Check if the pointer operand of a load or store instruction is
7857   // consecutive.
7858   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7859     return Legal->isConsecutivePtr(Ptr);
7860   return false;
7861 }
7862 
7863 void LoopVectorizationCostModel::collectValuesToIgnore() {
7864   // Ignore ephemeral values.
7865   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7866 
7867   // Ignore type-promoting instructions we identified during reduction
7868   // detection.
7869   for (auto &Reduction : Legal->getReductionVars()) {
7870     RecurrenceDescriptor &RedDes = Reduction.second;
7871     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7872     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7873   }
7874   // Ignore type-casting instructions we identified during induction
7875   // detection.
7876   for (auto &Induction : Legal->getInductionVars()) {
7877     InductionDescriptor &IndDes = Induction.second;
7878     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7879     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7880   }
7881 }
7882 
7883 void LoopVectorizationCostModel::collectInLoopReductions() {
7884   for (auto &Reduction : Legal->getReductionVars()) {
7885     PHINode *Phi = Reduction.first;
7886     RecurrenceDescriptor &RdxDesc = Reduction.second;
7887 
7888     // We don't collect reductions that are type promoted (yet).
7889     if (RdxDesc.getRecurrenceType() != Phi->getType())
7890       continue;
7891 
7892     // If the target would prefer this reduction to happen "in-loop", then we
7893     // want to record it as such.
7894     unsigned Opcode = RdxDesc.getOpcode();
7895     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7896         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7897                                    TargetTransformInfo::ReductionFlags()))
7898       continue;
7899 
7900     // Check that we can correctly put the reductions into the loop, by
7901     // finding the chain of operations that leads from the phi to the loop
7902     // exit value.
7903     SmallVector<Instruction *, 4> ReductionOperations =
7904         RdxDesc.getReductionOpChain(Phi, TheLoop);
7905     bool InLoop = !ReductionOperations.empty();
7906     if (InLoop) {
7907       InLoopReductionChains[Phi] = ReductionOperations;
7908       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7909       Instruction *LastChain = Phi;
7910       for (auto *I : ReductionOperations) {
7911         InLoopReductionImmediateChains[I] = LastChain;
7912         LastChain = I;
7913       }
7914     }
7915     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7916                       << " reduction for phi: " << *Phi << "\n");
7917   }
7918 }
7919 
7920 // TODO: we could return a pair of values that specify the max VF and
7921 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7922 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7923 // doesn't have a cost model that can choose which plan to execute if
7924 // more than one is generated.
7925 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7926                                  LoopVectorizationCostModel &CM) {
7927   unsigned WidestType;
7928   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7929   return WidestVectorRegBits / WidestType;
7930 }
7931 
7932 VectorizationFactor
7933 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7934   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7935   ElementCount VF = UserVF;
7936   // Outer loop handling: They may require CFG and instruction level
7937   // transformations before even evaluating whether vectorization is profitable.
7938   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7939   // the vectorization pipeline.
7940   if (!OrigLoop->isInnermost()) {
7941     // If the user doesn't provide a vectorization factor, determine a
7942     // reasonable one.
7943     if (UserVF.isZero()) {
7944       VF = ElementCount::getFixed(determineVPlanVF(
7945           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7946               .getFixedSize(),
7947           CM));
7948       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7949 
7950       // Make sure we have a VF > 1 for stress testing.
7951       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7952         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7953                           << "overriding computed VF.\n");
7954         VF = ElementCount::getFixed(4);
7955       }
7956     }
7957     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7958     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7959            "VF needs to be a power of two");
7960     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7961                       << "VF " << VF << " to build VPlans.\n");
7962     buildVPlans(VF, VF);
7963 
7964     // For VPlan build stress testing, we bail out after VPlan construction.
7965     if (VPlanBuildStressTest)
7966       return VectorizationFactor::Disabled();
7967 
7968     return {VF, 0 /*Cost*/};
7969   }
7970 
7971   LLVM_DEBUG(
7972       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7973                 "VPlan-native path.\n");
7974   return VectorizationFactor::Disabled();
7975 }
7976 
7977 Optional<VectorizationFactor>
7978 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7979   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7980   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7981   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7982     return None;
7983 
7984   // Invalidate interleave groups if all blocks of loop will be predicated.
7985   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7986       !useMaskedInterleavedAccesses(*TTI)) {
7987     LLVM_DEBUG(
7988         dbgs()
7989         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7990            "which requires masked-interleaved support.\n");
7991     if (CM.InterleaveInfo.invalidateGroups())
7992       // Invalidating interleave groups also requires invalidating all decisions
7993       // based on them, which includes widening decisions and uniform and scalar
7994       // values.
7995       CM.invalidateCostModelingDecisions();
7996   }
7997 
7998   ElementCount MaxUserVF =
7999       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
8000   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
8001   if (!UserVF.isZero() && UserVFIsLegal) {
8002     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
8003                       << " VF " << UserVF << ".\n");
8004     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
8005            "VF needs to be a power of two");
8006     // Collect the instructions (and their associated costs) that will be more
8007     // profitable to scalarize.
8008     CM.selectUserVectorizationFactor(UserVF);
8009     CM.collectInLoopReductions();
8010     buildVPlansWithVPRecipes(UserVF, UserVF);
8011     LLVM_DEBUG(printPlans(dbgs()));
8012     return {{UserVF, 0}};
8013   }
8014 
8015   // Populate the set of Vectorization Factor Candidates.
8016   ElementCountSet VFCandidates;
8017   for (auto VF = ElementCount::getFixed(1);
8018        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
8019     VFCandidates.insert(VF);
8020   for (auto VF = ElementCount::getScalable(1);
8021        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
8022     VFCandidates.insert(VF);
8023 
8024   for (const auto &VF : VFCandidates) {
8025     // Collect Uniform and Scalar instructions after vectorization with VF.
8026     CM.collectUniformsAndScalars(VF);
8027 
8028     // Collect the instructions (and their associated costs) that will be more
8029     // profitable to scalarize.
8030     if (VF.isVector())
8031       CM.collectInstsToScalarize(VF);
8032   }
8033 
8034   CM.collectInLoopReductions();
8035   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
8036   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
8037 
8038   LLVM_DEBUG(printPlans(dbgs()));
8039   if (!MaxFactors.hasVector())
8040     return VectorizationFactor::Disabled();
8041 
8042   // Select the optimal vectorization factor.
8043   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
8044 
8045   // Check if it is profitable to vectorize with runtime checks.
8046   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
8047   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
8048     bool PragmaThresholdReached =
8049         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
8050     bool ThresholdReached =
8051         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
8052     if ((ThresholdReached && !Hints.allowReordering()) ||
8053         PragmaThresholdReached) {
8054       ORE->emit([&]() {
8055         return OptimizationRemarkAnalysisAliasing(
8056                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
8057                    OrigLoop->getHeader())
8058                << "loop not vectorized: cannot prove it is safe to reorder "
8059                   "memory operations";
8060       });
8061       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8062       Hints.emitRemarkWithHints();
8063       return VectorizationFactor::Disabled();
8064     }
8065   }
8066   return SelectedVF;
8067 }
8068 
8069 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
8070   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
8071                     << '\n');
8072   BestVF = VF;
8073   BestUF = UF;
8074 
8075   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
8076     return !Plan->hasVF(VF);
8077   });
8078   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
8079 }
8080 
8081 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
8082                                            DominatorTree *DT) {
8083   // Perform the actual loop transformation.
8084 
8085   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
8086   assert(BestVF.hasValue() && "Vectorization Factor is missing");
8087   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
8088 
8089   VPTransformState State{
8090       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
8091   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
8092   State.TripCount = ILV.getOrCreateTripCount(nullptr);
8093   State.CanonicalIV = ILV.Induction;
8094 
8095   ILV.printDebugTracesAtStart();
8096 
8097   //===------------------------------------------------===//
8098   //
8099   // Notice: any optimization or new instruction that go
8100   // into the code below should also be implemented in
8101   // the cost-model.
8102   //
8103   //===------------------------------------------------===//
8104 
8105   // 2. Copy and widen instructions from the old loop into the new loop.
8106   VPlans.front()->execute(&State);
8107 
8108   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8109   //    predication, updating analyses.
8110   ILV.fixVectorizedLoop(State);
8111 
8112   ILV.printDebugTracesAtEnd();
8113 }
8114 
8115 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8116 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8117   for (const auto &Plan : VPlans)
8118     if (PrintVPlansInDotFormat)
8119       Plan->printDOT(O);
8120     else
8121       Plan->print(O);
8122 }
8123 #endif
8124 
8125 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8126     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8127 
8128   // We create new control-flow for the vectorized loop, so the original exit
8129   // conditions will be dead after vectorization if it's only used by the
8130   // terminator
8131   SmallVector<BasicBlock*> ExitingBlocks;
8132   OrigLoop->getExitingBlocks(ExitingBlocks);
8133   for (auto *BB : ExitingBlocks) {
8134     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8135     if (!Cmp || !Cmp->hasOneUse())
8136       continue;
8137 
8138     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8139     if (!DeadInstructions.insert(Cmp).second)
8140       continue;
8141 
8142     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8143     // TODO: can recurse through operands in general
8144     for (Value *Op : Cmp->operands()) {
8145       if (isa<TruncInst>(Op) && Op->hasOneUse())
8146           DeadInstructions.insert(cast<Instruction>(Op));
8147     }
8148   }
8149 
8150   // We create new "steps" for induction variable updates to which the original
8151   // induction variables map. An original update instruction will be dead if
8152   // all its users except the induction variable are dead.
8153   auto *Latch = OrigLoop->getLoopLatch();
8154   for (auto &Induction : Legal->getInductionVars()) {
8155     PHINode *Ind = Induction.first;
8156     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8157 
8158     // If the tail is to be folded by masking, the primary induction variable,
8159     // if exists, isn't dead: it will be used for masking. Don't kill it.
8160     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8161       continue;
8162 
8163     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8164           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8165         }))
8166       DeadInstructions.insert(IndUpdate);
8167 
8168     // We record as "Dead" also the type-casting instructions we had identified
8169     // during induction analysis. We don't need any handling for them in the
8170     // vectorized loop because we have proven that, under a proper runtime
8171     // test guarding the vectorized loop, the value of the phi, and the casted
8172     // value of the phi, are the same. The last instruction in this casting chain
8173     // will get its scalar/vector/widened def from the scalar/vector/widened def
8174     // of the respective phi node. Any other casts in the induction def-use chain
8175     // have no other uses outside the phi update chain, and will be ignored.
8176     InductionDescriptor &IndDes = Induction.second;
8177     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
8178     DeadInstructions.insert(Casts.begin(), Casts.end());
8179   }
8180 }
8181 
8182 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
8183 
8184 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8185 
8186 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
8187                                         Instruction::BinaryOps BinOp) {
8188   // When unrolling and the VF is 1, we only need to add a simple scalar.
8189   Type *Ty = Val->getType();
8190   assert(!Ty->isVectorTy() && "Val must be a scalar");
8191 
8192   if (Ty->isFloatingPointTy()) {
8193     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
8194 
8195     // Floating-point operations inherit FMF via the builder's flags.
8196     Value *MulOp = Builder.CreateFMul(C, Step);
8197     return Builder.CreateBinOp(BinOp, Val, MulOp);
8198   }
8199   Constant *C = ConstantInt::get(Ty, StartIdx);
8200   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
8201 }
8202 
8203 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8204   SmallVector<Metadata *, 4> MDs;
8205   // Reserve first location for self reference to the LoopID metadata node.
8206   MDs.push_back(nullptr);
8207   bool IsUnrollMetadata = false;
8208   MDNode *LoopID = L->getLoopID();
8209   if (LoopID) {
8210     // First find existing loop unrolling disable metadata.
8211     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8212       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8213       if (MD) {
8214         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8215         IsUnrollMetadata =
8216             S && S->getString().startswith("llvm.loop.unroll.disable");
8217       }
8218       MDs.push_back(LoopID->getOperand(i));
8219     }
8220   }
8221 
8222   if (!IsUnrollMetadata) {
8223     // Add runtime unroll disable metadata.
8224     LLVMContext &Context = L->getHeader()->getContext();
8225     SmallVector<Metadata *, 1> DisableOperands;
8226     DisableOperands.push_back(
8227         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8228     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8229     MDs.push_back(DisableNode);
8230     MDNode *NewLoopID = MDNode::get(Context, MDs);
8231     // Set operand 0 to refer to the loop id itself.
8232     NewLoopID->replaceOperandWith(0, NewLoopID);
8233     L->setLoopID(NewLoopID);
8234   }
8235 }
8236 
8237 //===--------------------------------------------------------------------===//
8238 // EpilogueVectorizerMainLoop
8239 //===--------------------------------------------------------------------===//
8240 
8241 /// This function is partially responsible for generating the control flow
8242 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8243 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8244   MDNode *OrigLoopID = OrigLoop->getLoopID();
8245   Loop *Lp = createVectorLoopSkeleton("");
8246 
8247   // Generate the code to check the minimum iteration count of the vector
8248   // epilogue (see below).
8249   EPI.EpilogueIterationCountCheck =
8250       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8251   EPI.EpilogueIterationCountCheck->setName("iter.check");
8252 
8253   // Generate the code to check any assumptions that we've made for SCEV
8254   // expressions.
8255   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8256 
8257   // Generate the code that checks at runtime if arrays overlap. We put the
8258   // checks into a separate block to make the more common case of few elements
8259   // faster.
8260   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8261 
8262   // Generate the iteration count check for the main loop, *after* the check
8263   // for the epilogue loop, so that the path-length is shorter for the case
8264   // that goes directly through the vector epilogue. The longer-path length for
8265   // the main loop is compensated for, by the gain from vectorizing the larger
8266   // trip count. Note: the branch will get updated later on when we vectorize
8267   // the epilogue.
8268   EPI.MainLoopIterationCountCheck =
8269       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8270 
8271   // Generate the induction variable.
8272   OldInduction = Legal->getPrimaryInduction();
8273   Type *IdxTy = Legal->getWidestInductionType();
8274   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8275   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8276   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8277   EPI.VectorTripCount = CountRoundDown;
8278   Induction =
8279       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8280                               getDebugLocFromInstOrOperands(OldInduction));
8281 
8282   // Skip induction resume value creation here because they will be created in
8283   // the second pass. If we created them here, they wouldn't be used anyway,
8284   // because the vplan in the second pass still contains the inductions from the
8285   // original loop.
8286 
8287   return completeLoopSkeleton(Lp, OrigLoopID);
8288 }
8289 
8290 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8291   LLVM_DEBUG({
8292     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8293            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8294            << ", Main Loop UF:" << EPI.MainLoopUF
8295            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8296            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8297   });
8298 }
8299 
8300 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8301   DEBUG_WITH_TYPE(VerboseDebug, {
8302     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8303   });
8304 }
8305 
8306 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8307     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8308   assert(L && "Expected valid Loop.");
8309   assert(Bypass && "Expected valid bypass basic block.");
8310   unsigned VFactor =
8311       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8312   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8313   Value *Count = getOrCreateTripCount(L);
8314   // Reuse existing vector loop preheader for TC checks.
8315   // Note that new preheader block is generated for vector loop.
8316   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8317   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8318 
8319   // Generate code to check if the loop's trip count is less than VF * UF of the
8320   // main vector loop.
8321   auto P =
8322       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8323 
8324   Value *CheckMinIters = Builder.CreateICmp(
8325       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8326       "min.iters.check");
8327 
8328   if (!ForEpilogue)
8329     TCCheckBlock->setName("vector.main.loop.iter.check");
8330 
8331   // Create new preheader for vector loop.
8332   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8333                                    DT, LI, nullptr, "vector.ph");
8334 
8335   if (ForEpilogue) {
8336     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8337                                  DT->getNode(Bypass)->getIDom()) &&
8338            "TC check is expected to dominate Bypass");
8339 
8340     // Update dominator for Bypass & LoopExit.
8341     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8342     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8343 
8344     LoopBypassBlocks.push_back(TCCheckBlock);
8345 
8346     // Save the trip count so we don't have to regenerate it in the
8347     // vec.epilog.iter.check. This is safe to do because the trip count
8348     // generated here dominates the vector epilog iter check.
8349     EPI.TripCount = Count;
8350   }
8351 
8352   ReplaceInstWithInst(
8353       TCCheckBlock->getTerminator(),
8354       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8355 
8356   return TCCheckBlock;
8357 }
8358 
8359 //===--------------------------------------------------------------------===//
8360 // EpilogueVectorizerEpilogueLoop
8361 //===--------------------------------------------------------------------===//
8362 
8363 /// This function is partially responsible for generating the control flow
8364 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8365 BasicBlock *
8366 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8367   MDNode *OrigLoopID = OrigLoop->getLoopID();
8368   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8369 
8370   // Now, compare the remaining count and if there aren't enough iterations to
8371   // execute the vectorized epilogue skip to the scalar part.
8372   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8373   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8374   LoopVectorPreHeader =
8375       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8376                  LI, nullptr, "vec.epilog.ph");
8377   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8378                                           VecEpilogueIterationCountCheck);
8379 
8380   // Adjust the control flow taking the state info from the main loop
8381   // vectorization into account.
8382   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8383          "expected this to be saved from the previous pass.");
8384   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8385       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8386 
8387   DT->changeImmediateDominator(LoopVectorPreHeader,
8388                                EPI.MainLoopIterationCountCheck);
8389 
8390   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8391       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8392 
8393   if (EPI.SCEVSafetyCheck)
8394     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8395         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8396   if (EPI.MemSafetyCheck)
8397     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8398         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8399 
8400   DT->changeImmediateDominator(
8401       VecEpilogueIterationCountCheck,
8402       VecEpilogueIterationCountCheck->getSinglePredecessor());
8403 
8404   DT->changeImmediateDominator(LoopScalarPreHeader,
8405                                EPI.EpilogueIterationCountCheck);
8406   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8407 
8408   // Keep track of bypass blocks, as they feed start values to the induction
8409   // phis in the scalar loop preheader.
8410   if (EPI.SCEVSafetyCheck)
8411     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8412   if (EPI.MemSafetyCheck)
8413     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8414   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8415 
8416   // Generate a resume induction for the vector epilogue and put it in the
8417   // vector epilogue preheader
8418   Type *IdxTy = Legal->getWidestInductionType();
8419   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8420                                          LoopVectorPreHeader->getFirstNonPHI());
8421   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8422   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8423                            EPI.MainLoopIterationCountCheck);
8424 
8425   // Generate the induction variable.
8426   OldInduction = Legal->getPrimaryInduction();
8427   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8428   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8429   Value *StartIdx = EPResumeVal;
8430   Induction =
8431       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8432                               getDebugLocFromInstOrOperands(OldInduction));
8433 
8434   // Generate induction resume values. These variables save the new starting
8435   // indexes for the scalar loop. They are used to test if there are any tail
8436   // iterations left once the vector loop has completed.
8437   // Note that when the vectorized epilogue is skipped due to iteration count
8438   // check, then the resume value for the induction variable comes from
8439   // the trip count of the main vector loop, hence passing the AdditionalBypass
8440   // argument.
8441   createInductionResumeValues(Lp, CountRoundDown,
8442                               {VecEpilogueIterationCountCheck,
8443                                EPI.VectorTripCount} /* AdditionalBypass */);
8444 
8445   AddRuntimeUnrollDisableMetaData(Lp);
8446   return completeLoopSkeleton(Lp, OrigLoopID);
8447 }
8448 
8449 BasicBlock *
8450 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8451     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8452 
8453   assert(EPI.TripCount &&
8454          "Expected trip count to have been safed in the first pass.");
8455   assert(
8456       (!isa<Instruction>(EPI.TripCount) ||
8457        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8458       "saved trip count does not dominate insertion point.");
8459   Value *TC = EPI.TripCount;
8460   IRBuilder<> Builder(Insert->getTerminator());
8461   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8462 
8463   // Generate code to check if the loop's trip count is less than VF * UF of the
8464   // vector epilogue loop.
8465   auto P =
8466       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8467 
8468   Value *CheckMinIters = Builder.CreateICmp(
8469       P, Count,
8470       ConstantInt::get(Count->getType(),
8471                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8472       "min.epilog.iters.check");
8473 
8474   ReplaceInstWithInst(
8475       Insert->getTerminator(),
8476       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8477 
8478   LoopBypassBlocks.push_back(Insert);
8479   return Insert;
8480 }
8481 
8482 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8483   LLVM_DEBUG({
8484     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8485            << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8486            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8487   });
8488 }
8489 
8490 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8491   DEBUG_WITH_TYPE(VerboseDebug, {
8492     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8493   });
8494 }
8495 
8496 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8497     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8498   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8499   bool PredicateAtRangeStart = Predicate(Range.Start);
8500 
8501   for (ElementCount TmpVF = Range.Start * 2;
8502        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8503     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8504       Range.End = TmpVF;
8505       break;
8506     }
8507 
8508   return PredicateAtRangeStart;
8509 }
8510 
8511 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8512 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8513 /// of VF's starting at a given VF and extending it as much as possible. Each
8514 /// vectorization decision can potentially shorten this sub-range during
8515 /// buildVPlan().
8516 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8517                                            ElementCount MaxVF) {
8518   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8519   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8520     VFRange SubRange = {VF, MaxVFPlusOne};
8521     VPlans.push_back(buildVPlan(SubRange));
8522     VF = SubRange.End;
8523   }
8524 }
8525 
8526 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8527                                          VPlanPtr &Plan) {
8528   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8529 
8530   // Look for cached value.
8531   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8532   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8533   if (ECEntryIt != EdgeMaskCache.end())
8534     return ECEntryIt->second;
8535 
8536   VPValue *SrcMask = createBlockInMask(Src, Plan);
8537 
8538   // The terminator has to be a branch inst!
8539   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8540   assert(BI && "Unexpected terminator found");
8541 
8542   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8543     return EdgeMaskCache[Edge] = SrcMask;
8544 
8545   // If source is an exiting block, we know the exit edge is dynamically dead
8546   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8547   // adding uses of an otherwise potentially dead instruction.
8548   if (OrigLoop->isLoopExiting(Src))
8549     return EdgeMaskCache[Edge] = SrcMask;
8550 
8551   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8552   assert(EdgeMask && "No Edge Mask found for condition");
8553 
8554   if (BI->getSuccessor(0) != Dst)
8555     EdgeMask = Builder.createNot(EdgeMask);
8556 
8557   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8558     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8559     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8560     // The select version does not introduce new UB if SrcMask is false and
8561     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8562     VPValue *False = Plan->getOrAddVPValue(
8563         ConstantInt::getFalse(BI->getCondition()->getType()));
8564     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8565   }
8566 
8567   return EdgeMaskCache[Edge] = EdgeMask;
8568 }
8569 
8570 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8571   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8572 
8573   // Look for cached value.
8574   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8575   if (BCEntryIt != BlockMaskCache.end())
8576     return BCEntryIt->second;
8577 
8578   // All-one mask is modelled as no-mask following the convention for masked
8579   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8580   VPValue *BlockMask = nullptr;
8581 
8582   if (OrigLoop->getHeader() == BB) {
8583     if (!CM.blockNeedsPredication(BB))
8584       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8585 
8586     // Create the block in mask as the first non-phi instruction in the block.
8587     VPBuilder::InsertPointGuard Guard(Builder);
8588     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8589     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8590 
8591     // Introduce the early-exit compare IV <= BTC to form header block mask.
8592     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8593     // Start by constructing the desired canonical IV.
8594     VPValue *IV = nullptr;
8595     if (Legal->getPrimaryInduction())
8596       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8597     else {
8598       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8599       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8600       IV = IVRecipe->getVPSingleValue();
8601     }
8602     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8603     bool TailFolded = !CM.isScalarEpilogueAllowed();
8604 
8605     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8606       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8607       // as a second argument, we only pass the IV here and extract the
8608       // tripcount from the transform state where codegen of the VP instructions
8609       // happen.
8610       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8611     } else {
8612       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8613     }
8614     return BlockMaskCache[BB] = BlockMask;
8615   }
8616 
8617   // This is the block mask. We OR all incoming edges.
8618   for (auto *Predecessor : predecessors(BB)) {
8619     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8620     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8621       return BlockMaskCache[BB] = EdgeMask;
8622 
8623     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8624       BlockMask = EdgeMask;
8625       continue;
8626     }
8627 
8628     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8629   }
8630 
8631   return BlockMaskCache[BB] = BlockMask;
8632 }
8633 
8634 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8635                                                 ArrayRef<VPValue *> Operands,
8636                                                 VFRange &Range,
8637                                                 VPlanPtr &Plan) {
8638   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8639          "Must be called with either a load or store");
8640 
8641   auto willWiden = [&](ElementCount VF) -> bool {
8642     if (VF.isScalar())
8643       return false;
8644     LoopVectorizationCostModel::InstWidening Decision =
8645         CM.getWideningDecision(I, VF);
8646     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8647            "CM decision should be taken at this point.");
8648     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8649       return true;
8650     if (CM.isScalarAfterVectorization(I, VF) ||
8651         CM.isProfitableToScalarize(I, VF))
8652       return false;
8653     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8654   };
8655 
8656   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8657     return nullptr;
8658 
8659   VPValue *Mask = nullptr;
8660   if (Legal->isMaskRequired(I))
8661     Mask = createBlockInMask(I->getParent(), Plan);
8662 
8663   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8664     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask);
8665 
8666   StoreInst *Store = cast<StoreInst>(I);
8667   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8668                                             Mask);
8669 }
8670 
8671 VPWidenIntOrFpInductionRecipe *
8672 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8673                                            ArrayRef<VPValue *> Operands) const {
8674   // Check if this is an integer or fp induction. If so, build the recipe that
8675   // produces its scalar and vector values.
8676   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8677   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8678       II.getKind() == InductionDescriptor::IK_FpInduction) {
8679     assert(II.getStartValue() ==
8680            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8681     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8682     return new VPWidenIntOrFpInductionRecipe(
8683         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8684   }
8685 
8686   return nullptr;
8687 }
8688 
8689 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8690     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8691     VPlan &Plan) const {
8692   // Optimize the special case where the source is a constant integer
8693   // induction variable. Notice that we can only optimize the 'trunc' case
8694   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8695   // (c) other casts depend on pointer size.
8696 
8697   // Determine whether \p K is a truncation based on an induction variable that
8698   // can be optimized.
8699   auto isOptimizableIVTruncate =
8700       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8701     return [=](ElementCount VF) -> bool {
8702       return CM.isOptimizableIVTruncate(K, VF);
8703     };
8704   };
8705 
8706   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8707           isOptimizableIVTruncate(I), Range)) {
8708 
8709     InductionDescriptor II =
8710         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8711     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8712     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8713                                              Start, nullptr, I);
8714   }
8715   return nullptr;
8716 }
8717 
8718 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8719                                                 ArrayRef<VPValue *> Operands,
8720                                                 VPlanPtr &Plan) {
8721   // If all incoming values are equal, the incoming VPValue can be used directly
8722   // instead of creating a new VPBlendRecipe.
8723   VPValue *FirstIncoming = Operands[0];
8724   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8725         return FirstIncoming == Inc;
8726       })) {
8727     return Operands[0];
8728   }
8729 
8730   // We know that all PHIs in non-header blocks are converted into selects, so
8731   // we don't have to worry about the insertion order and we can just use the
8732   // builder. At this point we generate the predication tree. There may be
8733   // duplications since this is a simple recursive scan, but future
8734   // optimizations will clean it up.
8735   SmallVector<VPValue *, 2> OperandsWithMask;
8736   unsigned NumIncoming = Phi->getNumIncomingValues();
8737 
8738   for (unsigned In = 0; In < NumIncoming; In++) {
8739     VPValue *EdgeMask =
8740       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8741     assert((EdgeMask || NumIncoming == 1) &&
8742            "Multiple predecessors with one having a full mask");
8743     OperandsWithMask.push_back(Operands[In]);
8744     if (EdgeMask)
8745       OperandsWithMask.push_back(EdgeMask);
8746   }
8747   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8748 }
8749 
8750 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8751                                                    ArrayRef<VPValue *> Operands,
8752                                                    VFRange &Range) const {
8753 
8754   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8755       [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
8756       Range);
8757 
8758   if (IsPredicated)
8759     return nullptr;
8760 
8761   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8762   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8763              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8764              ID == Intrinsic::pseudoprobe ||
8765              ID == Intrinsic::experimental_noalias_scope_decl))
8766     return nullptr;
8767 
8768   auto willWiden = [&](ElementCount VF) -> bool {
8769     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8770     // The following case may be scalarized depending on the VF.
8771     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8772     // version of the instruction.
8773     // Is it beneficial to perform intrinsic call compared to lib call?
8774     bool NeedToScalarize = false;
8775     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8776     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8777     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8778     assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
8779            "Either the intrinsic cost or vector call cost must be valid");
8780     return UseVectorIntrinsic || !NeedToScalarize;
8781   };
8782 
8783   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8784     return nullptr;
8785 
8786   ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
8787   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8788 }
8789 
8790 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8791   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8792          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8793   // Instruction should be widened, unless it is scalar after vectorization,
8794   // scalarization is profitable or it is predicated.
8795   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8796     return CM.isScalarAfterVectorization(I, VF) ||
8797            CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
8798   };
8799   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8800                                                              Range);
8801 }
8802 
8803 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8804                                            ArrayRef<VPValue *> Operands) const {
8805   auto IsVectorizableOpcode = [](unsigned Opcode) {
8806     switch (Opcode) {
8807     case Instruction::Add:
8808     case Instruction::And:
8809     case Instruction::AShr:
8810     case Instruction::BitCast:
8811     case Instruction::FAdd:
8812     case Instruction::FCmp:
8813     case Instruction::FDiv:
8814     case Instruction::FMul:
8815     case Instruction::FNeg:
8816     case Instruction::FPExt:
8817     case Instruction::FPToSI:
8818     case Instruction::FPToUI:
8819     case Instruction::FPTrunc:
8820     case Instruction::FRem:
8821     case Instruction::FSub:
8822     case Instruction::ICmp:
8823     case Instruction::IntToPtr:
8824     case Instruction::LShr:
8825     case Instruction::Mul:
8826     case Instruction::Or:
8827     case Instruction::PtrToInt:
8828     case Instruction::SDiv:
8829     case Instruction::Select:
8830     case Instruction::SExt:
8831     case Instruction::Shl:
8832     case Instruction::SIToFP:
8833     case Instruction::SRem:
8834     case Instruction::Sub:
8835     case Instruction::Trunc:
8836     case Instruction::UDiv:
8837     case Instruction::UIToFP:
8838     case Instruction::URem:
8839     case Instruction::Xor:
8840     case Instruction::ZExt:
8841       return true;
8842     }
8843     return false;
8844   };
8845 
8846   if (!IsVectorizableOpcode(I->getOpcode()))
8847     return nullptr;
8848 
8849   // Success: widen this instruction.
8850   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8851 }
8852 
8853 void VPRecipeBuilder::fixHeaderPhis() {
8854   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8855   for (VPWidenPHIRecipe *R : PhisToFix) {
8856     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8857     VPRecipeBase *IncR =
8858         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8859     R->addOperand(IncR->getVPSingleValue());
8860   }
8861 }
8862 
8863 VPBasicBlock *VPRecipeBuilder::handleReplication(
8864     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8865     VPlanPtr &Plan) {
8866   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8867       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8868       Range);
8869 
8870   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8871       [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range);
8872 
8873   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8874                                        IsUniform, IsPredicated);
8875   setRecipe(I, Recipe);
8876   Plan->addVPValue(I, Recipe);
8877 
8878   // Find if I uses a predicated instruction. If so, it will use its scalar
8879   // value. Avoid hoisting the insert-element which packs the scalar value into
8880   // a vector value, as that happens iff all users use the vector value.
8881   for (VPValue *Op : Recipe->operands()) {
8882     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8883     if (!PredR)
8884       continue;
8885     auto *RepR =
8886         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8887     assert(RepR->isPredicated() &&
8888            "expected Replicate recipe to be predicated");
8889     RepR->setAlsoPack(false);
8890   }
8891 
8892   // Finalize the recipe for Instr, first if it is not predicated.
8893   if (!IsPredicated) {
8894     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8895     VPBB->appendRecipe(Recipe);
8896     return VPBB;
8897   }
8898   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8899   assert(VPBB->getSuccessors().empty() &&
8900          "VPBB has successors when handling predicated replication.");
8901   // Record predicated instructions for above packing optimizations.
8902   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8903   VPBlockUtils::insertBlockAfter(Region, VPBB);
8904   auto *RegSucc = new VPBasicBlock();
8905   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8906   return RegSucc;
8907 }
8908 
8909 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8910                                                       VPRecipeBase *PredRecipe,
8911                                                       VPlanPtr &Plan) {
8912   // Instructions marked for predication are replicated and placed under an
8913   // if-then construct to prevent side-effects.
8914 
8915   // Generate recipes to compute the block mask for this region.
8916   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8917 
8918   // Build the triangular if-then region.
8919   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8920   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8921   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8922   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8923   auto *PHIRecipe = Instr->getType()->isVoidTy()
8924                         ? nullptr
8925                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8926   if (PHIRecipe) {
8927     Plan->removeVPValueFor(Instr);
8928     Plan->addVPValue(Instr, PHIRecipe);
8929   }
8930   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8931   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8932   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8933 
8934   // Note: first set Entry as region entry and then connect successors starting
8935   // from it in order, to propagate the "parent" of each VPBasicBlock.
8936   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8937   VPBlockUtils::connectBlocks(Pred, Exit);
8938 
8939   return Region;
8940 }
8941 
8942 VPRecipeOrVPValueTy
8943 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8944                                         ArrayRef<VPValue *> Operands,
8945                                         VFRange &Range, VPlanPtr &Plan) {
8946   // First, check for specific widening recipes that deal with calls, memory
8947   // operations, inductions and Phi nodes.
8948   if (auto *CI = dyn_cast<CallInst>(Instr))
8949     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8950 
8951   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8952     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8953 
8954   VPRecipeBase *Recipe;
8955   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8956     if (Phi->getParent() != OrigLoop->getHeader())
8957       return tryToBlend(Phi, Operands, Plan);
8958     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8959       return toVPRecipeResult(Recipe);
8960 
8961     if (Legal->isReductionVariable(Phi)) {
8962       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8963       assert(RdxDesc.getRecurrenceStartValue() ==
8964              Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8965       VPValue *StartV = Operands[0];
8966 
8967       auto *PhiRecipe = new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8968       PhisToFix.push_back(PhiRecipe);
8969       // Record the incoming value from the backedge, so we can add the incoming
8970       // value from the backedge after all recipes have been created.
8971       recordRecipeOf(cast<Instruction>(
8972           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8973       return toVPRecipeResult(PhiRecipe);
8974     }
8975 
8976     return toVPRecipeResult(new VPWidenPHIRecipe(Phi));
8977   }
8978 
8979   if (isa<TruncInst>(Instr) &&
8980       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8981                                                Range, *Plan)))
8982     return toVPRecipeResult(Recipe);
8983 
8984   if (!shouldWiden(Instr, Range))
8985     return nullptr;
8986 
8987   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8988     return toVPRecipeResult(new VPWidenGEPRecipe(
8989         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8990 
8991   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8992     bool InvariantCond =
8993         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8994     return toVPRecipeResult(new VPWidenSelectRecipe(
8995         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8996   }
8997 
8998   return toVPRecipeResult(tryToWiden(Instr, Operands));
8999 }
9000 
9001 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
9002                                                         ElementCount MaxVF) {
9003   assert(OrigLoop->isInnermost() && "Inner loop expected.");
9004 
9005   // Collect instructions from the original loop that will become trivially dead
9006   // in the vectorized loop. We don't need to vectorize these instructions. For
9007   // example, original induction update instructions can become dead because we
9008   // separately emit induction "steps" when generating code for the new loop.
9009   // Similarly, we create a new latch condition when setting up the structure
9010   // of the new loop, so the old one can become dead.
9011   SmallPtrSet<Instruction *, 4> DeadInstructions;
9012   collectTriviallyDeadInstructions(DeadInstructions);
9013 
9014   // Add assume instructions we need to drop to DeadInstructions, to prevent
9015   // them from being added to the VPlan.
9016   // TODO: We only need to drop assumes in blocks that get flattend. If the
9017   // control flow is preserved, we should keep them.
9018   auto &ConditionalAssumes = Legal->getConditionalAssumes();
9019   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
9020 
9021   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
9022   // Dead instructions do not need sinking. Remove them from SinkAfter.
9023   for (Instruction *I : DeadInstructions)
9024     SinkAfter.erase(I);
9025 
9026   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
9027   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
9028     VFRange SubRange = {VF, MaxVFPlusOne};
9029     VPlans.push_back(
9030         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
9031     VF = SubRange.End;
9032   }
9033 }
9034 
9035 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
9036     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
9037     const MapVector<Instruction *, Instruction *> &SinkAfter) {
9038 
9039   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
9040 
9041   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
9042 
9043   // ---------------------------------------------------------------------------
9044   // Pre-construction: record ingredients whose recipes we'll need to further
9045   // process after constructing the initial VPlan.
9046   // ---------------------------------------------------------------------------
9047 
9048   // Mark instructions we'll need to sink later and their targets as
9049   // ingredients whose recipe we'll need to record.
9050   for (auto &Entry : SinkAfter) {
9051     RecipeBuilder.recordRecipeOf(Entry.first);
9052     RecipeBuilder.recordRecipeOf(Entry.second);
9053   }
9054   for (auto &Reduction : CM.getInLoopReductionChains()) {
9055     PHINode *Phi = Reduction.first;
9056     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
9057     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9058 
9059     RecipeBuilder.recordRecipeOf(Phi);
9060     for (auto &R : ReductionOperations) {
9061       RecipeBuilder.recordRecipeOf(R);
9062       // For min/max reducitons, where we have a pair of icmp/select, we also
9063       // need to record the ICmp recipe, so it can be removed later.
9064       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9065         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9066     }
9067   }
9068 
9069   // For each interleave group which is relevant for this (possibly trimmed)
9070   // Range, add it to the set of groups to be later applied to the VPlan and add
9071   // placeholders for its members' Recipes which we'll be replacing with a
9072   // single VPInterleaveRecipe.
9073   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9074     auto applyIG = [IG, this](ElementCount VF) -> bool {
9075       return (VF.isVector() && // Query is illegal for VF == 1
9076               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9077                   LoopVectorizationCostModel::CM_Interleave);
9078     };
9079     if (!getDecisionAndClampRange(applyIG, Range))
9080       continue;
9081     InterleaveGroups.insert(IG);
9082     for (unsigned i = 0; i < IG->getFactor(); i++)
9083       if (Instruction *Member = IG->getMember(i))
9084         RecipeBuilder.recordRecipeOf(Member);
9085   };
9086 
9087   // ---------------------------------------------------------------------------
9088   // Build initial VPlan: Scan the body of the loop in a topological order to
9089   // visit each basic block after having visited its predecessor basic blocks.
9090   // ---------------------------------------------------------------------------
9091 
9092   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
9093   auto Plan = std::make_unique<VPlan>();
9094   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
9095   Plan->setEntry(VPBB);
9096 
9097   // Scan the body of the loop in a topological order to visit each basic block
9098   // after having visited its predecessor basic blocks.
9099   LoopBlocksDFS DFS(OrigLoop);
9100   DFS.perform(LI);
9101 
9102   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9103     // Relevant instructions from basic block BB will be grouped into VPRecipe
9104     // ingredients and fill a new VPBasicBlock.
9105     unsigned VPBBsForBB = 0;
9106     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
9107     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
9108     VPBB = FirstVPBBForBB;
9109     Builder.setInsertPoint(VPBB);
9110 
9111     // Introduce each ingredient into VPlan.
9112     // TODO: Model and preserve debug instrinsics in VPlan.
9113     for (Instruction &I : BB->instructionsWithoutDebug()) {
9114       Instruction *Instr = &I;
9115 
9116       // First filter out irrelevant instructions, to ensure no recipes are
9117       // built for them.
9118       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9119         continue;
9120 
9121       SmallVector<VPValue *, 4> Operands;
9122       auto *Phi = dyn_cast<PHINode>(Instr);
9123       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9124         Operands.push_back(Plan->getOrAddVPValue(
9125             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9126       } else {
9127         auto OpRange = Plan->mapToVPValues(Instr->operands());
9128         Operands = {OpRange.begin(), OpRange.end()};
9129       }
9130       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9131               Instr, Operands, Range, Plan)) {
9132         // If Instr can be simplified to an existing VPValue, use it.
9133         if (RecipeOrValue.is<VPValue *>()) {
9134           auto *VPV = RecipeOrValue.get<VPValue *>();
9135           Plan->addVPValue(Instr, VPV);
9136           // If the re-used value is a recipe, register the recipe for the
9137           // instruction, in case the recipe for Instr needs to be recorded.
9138           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9139             RecipeBuilder.setRecipe(Instr, R);
9140           continue;
9141         }
9142         // Otherwise, add the new recipe.
9143         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9144         for (auto *Def : Recipe->definedValues()) {
9145           auto *UV = Def->getUnderlyingValue();
9146           Plan->addVPValue(UV, Def);
9147         }
9148 
9149         RecipeBuilder.setRecipe(Instr, Recipe);
9150         VPBB->appendRecipe(Recipe);
9151         continue;
9152       }
9153 
9154       // Otherwise, if all widening options failed, Instruction is to be
9155       // replicated. This may create a successor for VPBB.
9156       VPBasicBlock *NextVPBB =
9157           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9158       if (NextVPBB != VPBB) {
9159         VPBB = NextVPBB;
9160         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9161                                     : "");
9162       }
9163     }
9164   }
9165 
9166   RecipeBuilder.fixHeaderPhis();
9167 
9168   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
9169   // may also be empty, such as the last one VPBB, reflecting original
9170   // basic-blocks with no recipes.
9171   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
9172   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
9173   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
9174   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
9175   delete PreEntry;
9176 
9177   // ---------------------------------------------------------------------------
9178   // Transform initial VPlan: Apply previously taken decisions, in order, to
9179   // bring the VPlan to its final state.
9180   // ---------------------------------------------------------------------------
9181 
9182   // Apply Sink-After legal constraints.
9183   for (auto &Entry : SinkAfter) {
9184     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9185     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9186 
9187     auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9188       auto *Region =
9189           dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9190       if (Region && Region->isReplicator())
9191         return Region;
9192       return nullptr;
9193     };
9194 
9195     // If the target is in a replication region, make sure to move Sink to the
9196     // block after it, not into the replication region itself.
9197     if (auto *TargetRegion = GetReplicateRegion(Target)) {
9198       assert(TargetRegion->getNumSuccessors() == 1 && "Expected SESE region!");
9199       assert(!GetReplicateRegion(Sink) &&
9200              "cannot sink a region into another region yet");
9201       VPBasicBlock *NextBlock =
9202           cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9203       Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9204       continue;
9205     }
9206 
9207     auto *SinkRegion = GetReplicateRegion(Sink);
9208     // Unless the sink source is in a replicate region, sink the recipe
9209     // directly.
9210     if (!SinkRegion) {
9211       Sink->moveAfter(Target);
9212       continue;
9213     }
9214 
9215     // If the sink source is in a replicate region, we need to move the whole
9216     // replicate region, which should only contain a single recipe in the main
9217     // block.
9218     assert(Sink->getParent()->size() == 1 &&
9219            "parent must be a replicator with a single recipe");
9220     auto *SplitBlock =
9221         Target->getParent()->splitAt(std::next(Target->getIterator()));
9222 
9223     auto *Pred = SinkRegion->getSinglePredecessor();
9224     auto *Succ = SinkRegion->getSingleSuccessor();
9225     VPBlockUtils::disconnectBlocks(Pred, SinkRegion);
9226     VPBlockUtils::disconnectBlocks(SinkRegion, Succ);
9227     VPBlockUtils::connectBlocks(Pred, Succ);
9228 
9229     auto *SplitPred = SplitBlock->getSinglePredecessor();
9230 
9231     VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9232     VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9233     VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9234     if (VPBB == SplitPred)
9235       VPBB = SplitBlock;
9236   }
9237 
9238   // Interleave memory: for each Interleave Group we marked earlier as relevant
9239   // for this VPlan, replace the Recipes widening its memory instructions with a
9240   // single VPInterleaveRecipe at its insertion point.
9241   for (auto IG : InterleaveGroups) {
9242     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9243         RecipeBuilder.getRecipe(IG->getInsertPos()));
9244     SmallVector<VPValue *, 4> StoredValues;
9245     for (unsigned i = 0; i < IG->getFactor(); ++i)
9246       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
9247         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
9248 
9249     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9250                                         Recipe->getMask());
9251     VPIG->insertBefore(Recipe);
9252     unsigned J = 0;
9253     for (unsigned i = 0; i < IG->getFactor(); ++i)
9254       if (Instruction *Member = IG->getMember(i)) {
9255         if (!Member->getType()->isVoidTy()) {
9256           VPValue *OriginalV = Plan->getVPValue(Member);
9257           Plan->removeVPValueFor(Member);
9258           Plan->addVPValue(Member, VPIG->getVPValue(J));
9259           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9260           J++;
9261         }
9262         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9263       }
9264   }
9265 
9266   // Adjust the recipes for any inloop reductions.
9267   if (Range.Start.isVector())
9268     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
9269 
9270   // Finally, if tail is folded by masking, introduce selects between the phi
9271   // and the live-out instruction of each reduction, at the end of the latch.
9272   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
9273     Builder.setInsertPoint(VPBB);
9274     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9275     for (auto &Reduction : Legal->getReductionVars()) {
9276       if (CM.isInLoopReduction(Reduction.first))
9277         continue;
9278       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
9279       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
9280       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
9281     }
9282   }
9283 
9284   VPlanTransforms::sinkScalarOperands(*Plan);
9285 
9286   std::string PlanName;
9287   raw_string_ostream RSO(PlanName);
9288   ElementCount VF = Range.Start;
9289   Plan->addVF(VF);
9290   RSO << "Initial VPlan for VF={" << VF;
9291   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9292     Plan->addVF(VF);
9293     RSO << "," << VF;
9294   }
9295   RSO << "},UF>=1";
9296   RSO.flush();
9297   Plan->setName(PlanName);
9298 
9299   return Plan;
9300 }
9301 
9302 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9303   // Outer loop handling: They may require CFG and instruction level
9304   // transformations before even evaluating whether vectorization is profitable.
9305   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9306   // the vectorization pipeline.
9307   assert(!OrigLoop->isInnermost());
9308   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9309 
9310   // Create new empty VPlan
9311   auto Plan = std::make_unique<VPlan>();
9312 
9313   // Build hierarchical CFG
9314   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9315   HCFGBuilder.buildHierarchicalCFG();
9316 
9317   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9318        VF *= 2)
9319     Plan->addVF(VF);
9320 
9321   if (EnableVPlanPredication) {
9322     VPlanPredicator VPP(*Plan);
9323     VPP.predicate();
9324 
9325     // Avoid running transformation to recipes until masked code generation in
9326     // VPlan-native path is in place.
9327     return Plan;
9328   }
9329 
9330   SmallPtrSet<Instruction *, 1> DeadInstructions;
9331   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9332                                              Legal->getInductionVars(),
9333                                              DeadInstructions, *PSE.getSE());
9334   return Plan;
9335 }
9336 
9337 // Adjust the recipes for any inloop reductions. The chain of instructions
9338 // leading from the loop exit instr to the phi need to be converted to
9339 // reductions, with one operand being vector and the other being the scalar
9340 // reduction chain.
9341 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
9342     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
9343   for (auto &Reduction : CM.getInLoopReductionChains()) {
9344     PHINode *Phi = Reduction.first;
9345     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9346     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9347 
9348     // ReductionOperations are orders top-down from the phi's use to the
9349     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9350     // which of the two operands will remain scalar and which will be reduced.
9351     // For minmax the chain will be the select instructions.
9352     Instruction *Chain = Phi;
9353     for (Instruction *R : ReductionOperations) {
9354       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9355       RecurKind Kind = RdxDesc.getRecurrenceKind();
9356 
9357       VPValue *ChainOp = Plan->getVPValue(Chain);
9358       unsigned FirstOpId;
9359       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9360         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9361                "Expected to replace a VPWidenSelectSC");
9362         FirstOpId = 1;
9363       } else {
9364         assert(isa<VPWidenRecipe>(WidenRecipe) &&
9365                "Expected to replace a VPWidenSC");
9366         FirstOpId = 0;
9367       }
9368       unsigned VecOpId =
9369           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9370       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9371 
9372       auto *CondOp = CM.foldTailByMasking()
9373                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9374                          : nullptr;
9375       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
9376           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9377       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9378       Plan->removeVPValueFor(R);
9379       Plan->addVPValue(R, RedRecipe);
9380       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9381       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9382       WidenRecipe->eraseFromParent();
9383 
9384       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9385         VPRecipeBase *CompareRecipe =
9386             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9387         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9388                "Expected to replace a VPWidenSC");
9389         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9390                "Expected no remaining users");
9391         CompareRecipe->eraseFromParent();
9392       }
9393       Chain = R;
9394     }
9395   }
9396 }
9397 
9398 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9399 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9400                                VPSlotTracker &SlotTracker) const {
9401   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9402   IG->getInsertPos()->printAsOperand(O, false);
9403   O << ", ";
9404   getAddr()->printAsOperand(O, SlotTracker);
9405   VPValue *Mask = getMask();
9406   if (Mask) {
9407     O << ", ";
9408     Mask->printAsOperand(O, SlotTracker);
9409   }
9410   for (unsigned i = 0; i < IG->getFactor(); ++i)
9411     if (Instruction *I = IG->getMember(i))
9412       O << "\n" << Indent << "  " << VPlanIngredient(I) << " " << i;
9413 }
9414 #endif
9415 
9416 void VPWidenCallRecipe::execute(VPTransformState &State) {
9417   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9418                                   *this, State);
9419 }
9420 
9421 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9422   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9423                                     this, *this, InvariantCond, State);
9424 }
9425 
9426 void VPWidenRecipe::execute(VPTransformState &State) {
9427   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9428 }
9429 
9430 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9431   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9432                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9433                       IsIndexLoopInvariant, State);
9434 }
9435 
9436 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9437   assert(!State.Instance && "Int or FP induction being replicated.");
9438   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9439                                    getTruncInst(), getVPValue(0),
9440                                    getCastValue(), State);
9441 }
9442 
9443 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9444   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
9445                                  this, State);
9446 }
9447 
9448 void VPBlendRecipe::execute(VPTransformState &State) {
9449   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9450   // We know that all PHIs in non-header blocks are converted into
9451   // selects, so we don't have to worry about the insertion order and we
9452   // can just use the builder.
9453   // At this point we generate the predication tree. There may be
9454   // duplications since this is a simple recursive scan, but future
9455   // optimizations will clean it up.
9456 
9457   unsigned NumIncoming = getNumIncomingValues();
9458 
9459   // Generate a sequence of selects of the form:
9460   // SELECT(Mask3, In3,
9461   //        SELECT(Mask2, In2,
9462   //               SELECT(Mask1, In1,
9463   //                      In0)))
9464   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9465   // are essentially undef are taken from In0.
9466   InnerLoopVectorizer::VectorParts Entry(State.UF);
9467   for (unsigned In = 0; In < NumIncoming; ++In) {
9468     for (unsigned Part = 0; Part < State.UF; ++Part) {
9469       // We might have single edge PHIs (blocks) - use an identity
9470       // 'select' for the first PHI operand.
9471       Value *In0 = State.get(getIncomingValue(In), Part);
9472       if (In == 0)
9473         Entry[Part] = In0; // Initialize with the first incoming value.
9474       else {
9475         // Select between the current value and the previous incoming edge
9476         // based on the incoming mask.
9477         Value *Cond = State.get(getMask(In), Part);
9478         Entry[Part] =
9479             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9480       }
9481     }
9482   }
9483   for (unsigned Part = 0; Part < State.UF; ++Part)
9484     State.set(this, Entry[Part], Part);
9485 }
9486 
9487 void VPInterleaveRecipe::execute(VPTransformState &State) {
9488   assert(!State.Instance && "Interleave group being replicated.");
9489   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9490                                       getStoredValues(), getMask());
9491 }
9492 
9493 void VPReductionRecipe::execute(VPTransformState &State) {
9494   assert(!State.Instance && "Reduction being replicated.");
9495   Value *PrevInChain = State.get(getChainOp(), 0);
9496   for (unsigned Part = 0; Part < State.UF; ++Part) {
9497     RecurKind Kind = RdxDesc->getRecurrenceKind();
9498     bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9499     Value *NewVecOp = State.get(getVecOp(), Part);
9500     if (VPValue *Cond = getCondOp()) {
9501       Value *NewCond = State.get(Cond, Part);
9502       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9503       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9504           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9505       Constant *IdenVec =
9506           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9507       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9508       NewVecOp = Select;
9509     }
9510     Value *NewRed;
9511     Value *NextInChain;
9512     if (IsOrdered) {
9513       NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9514                                       PrevInChain);
9515       PrevInChain = NewRed;
9516     } else {
9517       PrevInChain = State.get(getChainOp(), Part);
9518       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9519     }
9520     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9521       NextInChain =
9522           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9523                          NewRed, PrevInChain);
9524     } else if (IsOrdered)
9525       NextInChain = NewRed;
9526     else {
9527       NextInChain = State.Builder.CreateBinOp(
9528           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9529           PrevInChain);
9530     }
9531     State.set(this, NextInChain, Part);
9532   }
9533 }
9534 
9535 void VPReplicateRecipe::execute(VPTransformState &State) {
9536   if (State.Instance) { // Generate a single instance.
9537     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9538     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9539                                     *State.Instance, IsPredicated, State);
9540     // Insert scalar instance packing it into a vector.
9541     if (AlsoPack && State.VF.isVector()) {
9542       // If we're constructing lane 0, initialize to start from poison.
9543       if (State.Instance->Lane.isFirstLane()) {
9544         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9545         Value *Poison = PoisonValue::get(
9546             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9547         State.set(this, Poison, State.Instance->Part);
9548       }
9549       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9550     }
9551     return;
9552   }
9553 
9554   // Generate scalar instances for all VF lanes of all UF parts, unless the
9555   // instruction is uniform inwhich case generate only the first lane for each
9556   // of the UF parts.
9557   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9558   assert((!State.VF.isScalable() || IsUniform) &&
9559          "Can't scalarize a scalable vector");
9560   for (unsigned Part = 0; Part < State.UF; ++Part)
9561     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9562       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9563                                       VPIteration(Part, Lane), IsPredicated,
9564                                       State);
9565 }
9566 
9567 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9568   assert(State.Instance && "Branch on Mask works only on single instance.");
9569 
9570   unsigned Part = State.Instance->Part;
9571   unsigned Lane = State.Instance->Lane.getKnownLane();
9572 
9573   Value *ConditionBit = nullptr;
9574   VPValue *BlockInMask = getMask();
9575   if (BlockInMask) {
9576     ConditionBit = State.get(BlockInMask, Part);
9577     if (ConditionBit->getType()->isVectorTy())
9578       ConditionBit = State.Builder.CreateExtractElement(
9579           ConditionBit, State.Builder.getInt32(Lane));
9580   } else // Block in mask is all-one.
9581     ConditionBit = State.Builder.getTrue();
9582 
9583   // Replace the temporary unreachable terminator with a new conditional branch,
9584   // whose two destinations will be set later when they are created.
9585   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9586   assert(isa<UnreachableInst>(CurrentTerminator) &&
9587          "Expected to replace unreachable terminator with conditional branch.");
9588   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9589   CondBr->setSuccessor(0, nullptr);
9590   ReplaceInstWithInst(CurrentTerminator, CondBr);
9591 }
9592 
9593 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9594   assert(State.Instance && "Predicated instruction PHI works per instance.");
9595   Instruction *ScalarPredInst =
9596       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9597   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9598   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9599   assert(PredicatingBB && "Predicated block has no single predecessor.");
9600   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9601          "operand must be VPReplicateRecipe");
9602 
9603   // By current pack/unpack logic we need to generate only a single phi node: if
9604   // a vector value for the predicated instruction exists at this point it means
9605   // the instruction has vector users only, and a phi for the vector value is
9606   // needed. In this case the recipe of the predicated instruction is marked to
9607   // also do that packing, thereby "hoisting" the insert-element sequence.
9608   // Otherwise, a phi node for the scalar value is needed.
9609   unsigned Part = State.Instance->Part;
9610   if (State.hasVectorValue(getOperand(0), Part)) {
9611     Value *VectorValue = State.get(getOperand(0), Part);
9612     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9613     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9614     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9615     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9616     if (State.hasVectorValue(this, Part))
9617       State.reset(this, VPhi, Part);
9618     else
9619       State.set(this, VPhi, Part);
9620     // NOTE: Currently we need to update the value of the operand, so the next
9621     // predicated iteration inserts its generated value in the correct vector.
9622     State.reset(getOperand(0), VPhi, Part);
9623   } else {
9624     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9625     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9626     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9627                      PredicatingBB);
9628     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9629     if (State.hasScalarValue(this, *State.Instance))
9630       State.reset(this, Phi, *State.Instance);
9631     else
9632       State.set(this, Phi, *State.Instance);
9633     // NOTE: Currently we need to update the value of the operand, so the next
9634     // predicated iteration inserts its generated value in the correct vector.
9635     State.reset(getOperand(0), Phi, *State.Instance);
9636   }
9637 }
9638 
9639 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9640   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9641   State.ILV->vectorizeMemoryInstruction(
9642       &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(),
9643       StoredValue, getMask());
9644 }
9645 
9646 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9647 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9648 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9649 // for predication.
9650 static ScalarEpilogueLowering getScalarEpilogueLowering(
9651     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9652     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9653     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9654     LoopVectorizationLegality &LVL) {
9655   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9656   // don't look at hints or options, and don't request a scalar epilogue.
9657   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9658   // LoopAccessInfo (due to code dependency and not being able to reliably get
9659   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9660   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9661   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9662   // back to the old way and vectorize with versioning when forced. See D81345.)
9663   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9664                                                       PGSOQueryType::IRPass) &&
9665                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9666     return CM_ScalarEpilogueNotAllowedOptSize;
9667 
9668   // 2) If set, obey the directives
9669   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9670     switch (PreferPredicateOverEpilogue) {
9671     case PreferPredicateTy::ScalarEpilogue:
9672       return CM_ScalarEpilogueAllowed;
9673     case PreferPredicateTy::PredicateElseScalarEpilogue:
9674       return CM_ScalarEpilogueNotNeededUsePredicate;
9675     case PreferPredicateTy::PredicateOrDontVectorize:
9676       return CM_ScalarEpilogueNotAllowedUsePredicate;
9677     };
9678   }
9679 
9680   // 3) If set, obey the hints
9681   switch (Hints.getPredicate()) {
9682   case LoopVectorizeHints::FK_Enabled:
9683     return CM_ScalarEpilogueNotNeededUsePredicate;
9684   case LoopVectorizeHints::FK_Disabled:
9685     return CM_ScalarEpilogueAllowed;
9686   };
9687 
9688   // 4) if the TTI hook indicates this is profitable, request predication.
9689   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9690                                        LVL.getLAI()))
9691     return CM_ScalarEpilogueNotNeededUsePredicate;
9692 
9693   return CM_ScalarEpilogueAllowed;
9694 }
9695 
9696 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9697   // If Values have been set for this Def return the one relevant for \p Part.
9698   if (hasVectorValue(Def, Part))
9699     return Data.PerPartOutput[Def][Part];
9700 
9701   if (!hasScalarValue(Def, {Part, 0})) {
9702     Value *IRV = Def->getLiveInIRValue();
9703     Value *B = ILV->getBroadcastInstrs(IRV);
9704     set(Def, B, Part);
9705     return B;
9706   }
9707 
9708   Value *ScalarValue = get(Def, {Part, 0});
9709   // If we aren't vectorizing, we can just copy the scalar map values over
9710   // to the vector map.
9711   if (VF.isScalar()) {
9712     set(Def, ScalarValue, Part);
9713     return ScalarValue;
9714   }
9715 
9716   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9717   bool IsUniform = RepR && RepR->isUniform();
9718 
9719   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9720   // Check if there is a scalar value for the selected lane.
9721   if (!hasScalarValue(Def, {Part, LastLane})) {
9722     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
9723     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
9724            "unexpected recipe found to be invariant");
9725     IsUniform = true;
9726     LastLane = 0;
9727   }
9728 
9729   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9730 
9731   // Set the insert point after the last scalarized instruction. This
9732   // ensures the insertelement sequence will directly follow the scalar
9733   // definitions.
9734   auto OldIP = Builder.saveIP();
9735   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9736   Builder.SetInsertPoint(&*NewIP);
9737 
9738   // However, if we are vectorizing, we need to construct the vector values.
9739   // If the value is known to be uniform after vectorization, we can just
9740   // broadcast the scalar value corresponding to lane zero for each unroll
9741   // iteration. Otherwise, we construct the vector values using
9742   // insertelement instructions. Since the resulting vectors are stored in
9743   // State, we will only generate the insertelements once.
9744   Value *VectorValue = nullptr;
9745   if (IsUniform) {
9746     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9747     set(Def, VectorValue, Part);
9748   } else {
9749     // Initialize packing with insertelements to start from undef.
9750     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9751     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9752     set(Def, Undef, Part);
9753     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9754       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9755     VectorValue = get(Def, Part);
9756   }
9757   Builder.restoreIP(OldIP);
9758   return VectorValue;
9759 }
9760 
9761 // Process the loop in the VPlan-native vectorization path. This path builds
9762 // VPlan upfront in the vectorization pipeline, which allows to apply
9763 // VPlan-to-VPlan transformations from the very beginning without modifying the
9764 // input LLVM IR.
9765 static bool processLoopInVPlanNativePath(
9766     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9767     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9768     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9769     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9770     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
9771     LoopVectorizationRequirements &Requirements) {
9772 
9773   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9774     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9775     return false;
9776   }
9777   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9778   Function *F = L->getHeader()->getParent();
9779   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9780 
9781   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9782       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9783 
9784   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9785                                 &Hints, IAI);
9786   // Use the planner for outer loop vectorization.
9787   // TODO: CM is not used at this point inside the planner. Turn CM into an
9788   // optional argument if we don't need it in the future.
9789   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
9790                                Requirements, ORE);
9791 
9792   // Get user vectorization factor.
9793   ElementCount UserVF = Hints.getWidth();
9794 
9795   // Plan how to best vectorize, return the best VF and its cost.
9796   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9797 
9798   // If we are stress testing VPlan builds, do not attempt to generate vector
9799   // code. Masked vector code generation support will follow soon.
9800   // Also, do not attempt to vectorize if no vector code will be produced.
9801   if (VPlanBuildStressTest || EnableVPlanPredication ||
9802       VectorizationFactor::Disabled() == VF)
9803     return false;
9804 
9805   LVP.setBestPlan(VF.Width, 1);
9806 
9807   {
9808     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9809                              F->getParent()->getDataLayout());
9810     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9811                            &CM, BFI, PSI, Checks);
9812     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9813                       << L->getHeader()->getParent()->getName() << "\"\n");
9814     LVP.executePlan(LB, DT);
9815   }
9816 
9817   // Mark the loop as already vectorized to avoid vectorizing again.
9818   Hints.setAlreadyVectorized();
9819   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9820   return true;
9821 }
9822 
9823 // Emit a remark if there are stores to floats that required a floating point
9824 // extension. If the vectorized loop was generated with floating point there
9825 // will be a performance penalty from the conversion overhead and the change in
9826 // the vector width.
9827 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9828   SmallVector<Instruction *, 4> Worklist;
9829   for (BasicBlock *BB : L->getBlocks()) {
9830     for (Instruction &Inst : *BB) {
9831       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9832         if (S->getValueOperand()->getType()->isFloatTy())
9833           Worklist.push_back(S);
9834       }
9835     }
9836   }
9837 
9838   // Traverse the floating point stores upwards searching, for floating point
9839   // conversions.
9840   SmallPtrSet<const Instruction *, 4> Visited;
9841   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9842   while (!Worklist.empty()) {
9843     auto *I = Worklist.pop_back_val();
9844     if (!L->contains(I))
9845       continue;
9846     if (!Visited.insert(I).second)
9847       continue;
9848 
9849     // Emit a remark if the floating point store required a floating
9850     // point conversion.
9851     // TODO: More work could be done to identify the root cause such as a
9852     // constant or a function return type and point the user to it.
9853     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9854       ORE->emit([&]() {
9855         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9856                                           I->getDebugLoc(), L->getHeader())
9857                << "floating point conversion changes vector width. "
9858                << "Mixed floating point precision requires an up/down "
9859                << "cast that will negatively impact performance.";
9860       });
9861 
9862     for (Use &Op : I->operands())
9863       if (auto *OpI = dyn_cast<Instruction>(Op))
9864         Worklist.push_back(OpI);
9865   }
9866 }
9867 
9868 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9869     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9870                                !EnableLoopInterleaving),
9871       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9872                               !EnableLoopVectorization) {}
9873 
9874 bool LoopVectorizePass::processLoop(Loop *L) {
9875   assert((EnableVPlanNativePath || L->isInnermost()) &&
9876          "VPlan-native path is not enabled. Only process inner loops.");
9877 
9878 #ifndef NDEBUG
9879   const std::string DebugLocStr = getDebugLocString(L);
9880 #endif /* NDEBUG */
9881 
9882   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9883                     << L->getHeader()->getParent()->getName() << "\" from "
9884                     << DebugLocStr << "\n");
9885 
9886   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9887 
9888   LLVM_DEBUG(
9889       dbgs() << "LV: Loop hints:"
9890              << " force="
9891              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9892                      ? "disabled"
9893                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9894                             ? "enabled"
9895                             : "?"))
9896              << " width=" << Hints.getWidth()
9897              << " interleave=" << Hints.getInterleave() << "\n");
9898 
9899   // Function containing loop
9900   Function *F = L->getHeader()->getParent();
9901 
9902   // Looking at the diagnostic output is the only way to determine if a loop
9903   // was vectorized (other than looking at the IR or machine code), so it
9904   // is important to generate an optimization remark for each loop. Most of
9905   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9906   // generated as OptimizationRemark and OptimizationRemarkMissed are
9907   // less verbose reporting vectorized loops and unvectorized loops that may
9908   // benefit from vectorization, respectively.
9909 
9910   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9911     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9912     return false;
9913   }
9914 
9915   PredicatedScalarEvolution PSE(*SE, *L);
9916 
9917   // Check if it is legal to vectorize the loop.
9918   LoopVectorizationRequirements Requirements;
9919   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9920                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9921   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9922     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9923     Hints.emitRemarkWithHints();
9924     return false;
9925   }
9926 
9927   // Check the function attributes and profiles to find out if this function
9928   // should be optimized for size.
9929   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9930       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9931 
9932   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9933   // here. They may require CFG and instruction level transformations before
9934   // even evaluating whether vectorization is profitable. Since we cannot modify
9935   // the incoming IR, we need to build VPlan upfront in the vectorization
9936   // pipeline.
9937   if (!L->isInnermost())
9938     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9939                                         ORE, BFI, PSI, Hints, Requirements);
9940 
9941   assert(L->isInnermost() && "Inner loop expected.");
9942 
9943   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9944   // count by optimizing for size, to minimize overheads.
9945   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9946   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9947     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9948                       << "This loop is worth vectorizing only if no scalar "
9949                       << "iteration overheads are incurred.");
9950     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9951       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9952     else {
9953       LLVM_DEBUG(dbgs() << "\n");
9954       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9955     }
9956   }
9957 
9958   // Check the function attributes to see if implicit floats are allowed.
9959   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9960   // an integer loop and the vector instructions selected are purely integer
9961   // vector instructions?
9962   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9963     reportVectorizationFailure(
9964         "Can't vectorize when the NoImplicitFloat attribute is used",
9965         "loop not vectorized due to NoImplicitFloat attribute",
9966         "NoImplicitFloat", ORE, L);
9967     Hints.emitRemarkWithHints();
9968     return false;
9969   }
9970 
9971   // Check if the target supports potentially unsafe FP vectorization.
9972   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9973   // for the target we're vectorizing for, to make sure none of the
9974   // additional fp-math flags can help.
9975   if (Hints.isPotentiallyUnsafe() &&
9976       TTI->isFPVectorizationPotentiallyUnsafe()) {
9977     reportVectorizationFailure(
9978         "Potentially unsafe FP op prevents vectorization",
9979         "loop not vectorized due to unsafe FP support.",
9980         "UnsafeFP", ORE, L);
9981     Hints.emitRemarkWithHints();
9982     return false;
9983   }
9984 
9985   if (!LVL.canVectorizeFPMath(EnableStrictReductions)) {
9986     ORE->emit([&]() {
9987       auto *ExactFPMathInst = Requirements.getExactFPInst();
9988       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9989                                                  ExactFPMathInst->getDebugLoc(),
9990                                                  ExactFPMathInst->getParent())
9991              << "loop not vectorized: cannot prove it is safe to reorder "
9992                 "floating-point operations";
9993     });
9994     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9995                          "reorder floating-point operations\n");
9996     Hints.emitRemarkWithHints();
9997     return false;
9998   }
9999 
10000   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10001   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10002 
10003   // If an override option has been passed in for interleaved accesses, use it.
10004   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10005     UseInterleaved = EnableInterleavedMemAccesses;
10006 
10007   // Analyze interleaved memory accesses.
10008   if (UseInterleaved) {
10009     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10010   }
10011 
10012   // Use the cost model.
10013   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10014                                 F, &Hints, IAI);
10015   CM.collectValuesToIgnore();
10016 
10017   // Use the planner for vectorization.
10018   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10019                                Requirements, ORE);
10020 
10021   // Get user vectorization factor and interleave count.
10022   ElementCount UserVF = Hints.getWidth();
10023   unsigned UserIC = Hints.getInterleave();
10024 
10025   // Plan how to best vectorize, return the best VF and its cost.
10026   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10027 
10028   VectorizationFactor VF = VectorizationFactor::Disabled();
10029   unsigned IC = 1;
10030 
10031   if (MaybeVF) {
10032     VF = *MaybeVF;
10033     // Select the interleave count.
10034     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10035   }
10036 
10037   // Identify the diagnostic messages that should be produced.
10038   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10039   bool VectorizeLoop = true, InterleaveLoop = true;
10040   if (VF.Width.isScalar()) {
10041     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10042     VecDiagMsg = std::make_pair(
10043         "VectorizationNotBeneficial",
10044         "the cost-model indicates that vectorization is not beneficial");
10045     VectorizeLoop = false;
10046   }
10047 
10048   if (!MaybeVF && UserIC > 1) {
10049     // Tell the user interleaving was avoided up-front, despite being explicitly
10050     // requested.
10051     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10052                          "interleaving should be avoided up front\n");
10053     IntDiagMsg = std::make_pair(
10054         "InterleavingAvoided",
10055         "Ignoring UserIC, because interleaving was avoided up front");
10056     InterleaveLoop = false;
10057   } else if (IC == 1 && UserIC <= 1) {
10058     // Tell the user interleaving is not beneficial.
10059     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10060     IntDiagMsg = std::make_pair(
10061         "InterleavingNotBeneficial",
10062         "the cost-model indicates that interleaving is not beneficial");
10063     InterleaveLoop = false;
10064     if (UserIC == 1) {
10065       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10066       IntDiagMsg.second +=
10067           " and is explicitly disabled or interleave count is set to 1";
10068     }
10069   } else if (IC > 1 && UserIC == 1) {
10070     // Tell the user interleaving is beneficial, but it explicitly disabled.
10071     LLVM_DEBUG(
10072         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10073     IntDiagMsg = std::make_pair(
10074         "InterleavingBeneficialButDisabled",
10075         "the cost-model indicates that interleaving is beneficial "
10076         "but is explicitly disabled or interleave count is set to 1");
10077     InterleaveLoop = false;
10078   }
10079 
10080   // Override IC if user provided an interleave count.
10081   IC = UserIC > 0 ? UserIC : IC;
10082 
10083   // Emit diagnostic messages, if any.
10084   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10085   if (!VectorizeLoop && !InterleaveLoop) {
10086     // Do not vectorize or interleaving the loop.
10087     ORE->emit([&]() {
10088       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10089                                       L->getStartLoc(), L->getHeader())
10090              << VecDiagMsg.second;
10091     });
10092     ORE->emit([&]() {
10093       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10094                                       L->getStartLoc(), L->getHeader())
10095              << IntDiagMsg.second;
10096     });
10097     return false;
10098   } else if (!VectorizeLoop && InterleaveLoop) {
10099     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10100     ORE->emit([&]() {
10101       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10102                                         L->getStartLoc(), L->getHeader())
10103              << VecDiagMsg.second;
10104     });
10105   } else if (VectorizeLoop && !InterleaveLoop) {
10106     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10107                       << ") in " << DebugLocStr << '\n');
10108     ORE->emit([&]() {
10109       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10110                                         L->getStartLoc(), L->getHeader())
10111              << IntDiagMsg.second;
10112     });
10113   } else if (VectorizeLoop && InterleaveLoop) {
10114     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10115                       << ") in " << DebugLocStr << '\n');
10116     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10117   }
10118 
10119   bool DisableRuntimeUnroll = false;
10120   MDNode *OrigLoopID = L->getLoopID();
10121   {
10122     // Optimistically generate runtime checks. Drop them if they turn out to not
10123     // be profitable. Limit the scope of Checks, so the cleanup happens
10124     // immediately after vector codegeneration is done.
10125     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10126                              F->getParent()->getDataLayout());
10127     if (!VF.Width.isScalar() || IC > 1)
10128       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10129     LVP.setBestPlan(VF.Width, IC);
10130 
10131     using namespace ore;
10132     if (!VectorizeLoop) {
10133       assert(IC > 1 && "interleave count should not be 1 or 0");
10134       // If we decided that it is not legal to vectorize the loop, then
10135       // interleave it.
10136       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10137                                  &CM, BFI, PSI, Checks);
10138       LVP.executePlan(Unroller, DT);
10139 
10140       ORE->emit([&]() {
10141         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10142                                   L->getHeader())
10143                << "interleaved loop (interleaved count: "
10144                << NV("InterleaveCount", IC) << ")";
10145       });
10146     } else {
10147       // If we decided that it is *legal* to vectorize the loop, then do it.
10148 
10149       // Consider vectorizing the epilogue too if it's profitable.
10150       VectorizationFactor EpilogueVF =
10151           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10152       if (EpilogueVF.Width.isVector()) {
10153 
10154         // The first pass vectorizes the main loop and creates a scalar epilogue
10155         // to be vectorized by executing the plan (potentially with a different
10156         // factor) again shortly afterwards.
10157         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
10158                                           EpilogueVF.Width.getKnownMinValue(),
10159                                           1);
10160         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10161                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10162 
10163         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
10164         LVP.executePlan(MainILV, DT);
10165         ++LoopsVectorized;
10166 
10167         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10168         formLCSSARecursively(*L, *DT, LI, SE);
10169 
10170         // Second pass vectorizes the epilogue and adjusts the control flow
10171         // edges from the first pass.
10172         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
10173         EPI.MainLoopVF = EPI.EpilogueVF;
10174         EPI.MainLoopUF = EPI.EpilogueUF;
10175         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10176                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10177                                                  Checks);
10178         LVP.executePlan(EpilogILV, DT);
10179         ++LoopsEpilogueVectorized;
10180 
10181         if (!MainILV.areSafetyChecksAdded())
10182           DisableRuntimeUnroll = true;
10183       } else {
10184         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10185                                &LVL, &CM, BFI, PSI, Checks);
10186         LVP.executePlan(LB, DT);
10187         ++LoopsVectorized;
10188 
10189         // Add metadata to disable runtime unrolling a scalar loop when there
10190         // are no runtime checks about strides and memory. A scalar loop that is
10191         // rarely used is not worth unrolling.
10192         if (!LB.areSafetyChecksAdded())
10193           DisableRuntimeUnroll = true;
10194       }
10195       // Report the vectorization decision.
10196       ORE->emit([&]() {
10197         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10198                                   L->getHeader())
10199                << "vectorized loop (vectorization width: "
10200                << NV("VectorizationFactor", VF.Width)
10201                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10202       });
10203     }
10204 
10205     if (ORE->allowExtraAnalysis(LV_NAME))
10206       checkMixedPrecision(L, ORE);
10207   }
10208 
10209   Optional<MDNode *> RemainderLoopID =
10210       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10211                                       LLVMLoopVectorizeFollowupEpilogue});
10212   if (RemainderLoopID.hasValue()) {
10213     L->setLoopID(RemainderLoopID.getValue());
10214   } else {
10215     if (DisableRuntimeUnroll)
10216       AddRuntimeUnrollDisableMetaData(L);
10217 
10218     // Mark the loop as already vectorized to avoid vectorizing again.
10219     Hints.setAlreadyVectorized();
10220   }
10221 
10222   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10223   return true;
10224 }
10225 
10226 LoopVectorizeResult LoopVectorizePass::runImpl(
10227     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10228     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10229     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10230     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10231     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10232   SE = &SE_;
10233   LI = &LI_;
10234   TTI = &TTI_;
10235   DT = &DT_;
10236   BFI = &BFI_;
10237   TLI = TLI_;
10238   AA = &AA_;
10239   AC = &AC_;
10240   GetLAA = &GetLAA_;
10241   DB = &DB_;
10242   ORE = &ORE_;
10243   PSI = PSI_;
10244 
10245   // Don't attempt if
10246   // 1. the target claims to have no vector registers, and
10247   // 2. interleaving won't help ILP.
10248   //
10249   // The second condition is necessary because, even if the target has no
10250   // vector registers, loop vectorization may still enable scalar
10251   // interleaving.
10252   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10253       TTI->getMaxInterleaveFactor(1) < 2)
10254     return LoopVectorizeResult(false, false);
10255 
10256   bool Changed = false, CFGChanged = false;
10257 
10258   // The vectorizer requires loops to be in simplified form.
10259   // Since simplification may add new inner loops, it has to run before the
10260   // legality and profitability checks. This means running the loop vectorizer
10261   // will simplify all loops, regardless of whether anything end up being
10262   // vectorized.
10263   for (auto &L : *LI)
10264     Changed |= CFGChanged |=
10265         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10266 
10267   // Build up a worklist of inner-loops to vectorize. This is necessary as
10268   // the act of vectorizing or partially unrolling a loop creates new loops
10269   // and can invalidate iterators across the loops.
10270   SmallVector<Loop *, 8> Worklist;
10271 
10272   for (Loop *L : *LI)
10273     collectSupportedLoops(*L, LI, ORE, Worklist);
10274 
10275   LoopsAnalyzed += Worklist.size();
10276 
10277   // Now walk the identified inner loops.
10278   while (!Worklist.empty()) {
10279     Loop *L = Worklist.pop_back_val();
10280 
10281     // For the inner loops we actually process, form LCSSA to simplify the
10282     // transform.
10283     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10284 
10285     Changed |= CFGChanged |= processLoop(L);
10286   }
10287 
10288   // Process each loop nest in the function.
10289   return LoopVectorizeResult(Changed, CFGChanged);
10290 }
10291 
10292 PreservedAnalyses LoopVectorizePass::run(Function &F,
10293                                          FunctionAnalysisManager &AM) {
10294     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10295     auto &LI = AM.getResult<LoopAnalysis>(F);
10296     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10297     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10298     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10299     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10300     auto &AA = AM.getResult<AAManager>(F);
10301     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10302     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10303     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10304     MemorySSA *MSSA = EnableMSSALoopDependency
10305                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
10306                           : nullptr;
10307 
10308     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10309     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10310         [&](Loop &L) -> const LoopAccessInfo & {
10311       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
10312                                         TLI, TTI, nullptr, MSSA};
10313       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10314     };
10315     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10316     ProfileSummaryInfo *PSI =
10317         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10318     LoopVectorizeResult Result =
10319         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10320     if (!Result.MadeAnyChange)
10321       return PreservedAnalyses::all();
10322     PreservedAnalyses PA;
10323 
10324     // We currently do not preserve loopinfo/dominator analyses with outer loop
10325     // vectorization. Until this is addressed, mark these analyses as preserved
10326     // only for non-VPlan-native path.
10327     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10328     if (!EnableVPlanNativePath) {
10329       PA.preserve<LoopAnalysis>();
10330       PA.preserve<DominatorTreeAnalysis>();
10331     }
10332     if (!Result.MadeCFGChange)
10333       PA.preserveSet<CFGAnalyses>();
10334     return PA;
10335 }
10336