1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/PatternMatch.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/InstructionCost.h"
135 #include "llvm/Support/MathExtras.h"
136 #include "llvm/Support/raw_ostream.h"
137 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
138 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
139 #include "llvm/Transforms/Utils/LoopSimplify.h"
140 #include "llvm/Transforms/Utils/LoopUtils.h"
141 #include "llvm/Transforms/Utils/LoopVersioning.h"
142 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
143 #include "llvm/Transforms/Utils/SizeOpts.h"
144 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
145 #include <algorithm>
146 #include <cassert>
147 #include <cstdint>
148 #include <cstdlib>
149 #include <functional>
150 #include <iterator>
151 #include <limits>
152 #include <memory>
153 #include <string>
154 #include <tuple>
155 #include <utility>
156 
157 using namespace llvm;
158 
159 #define LV_NAME "loop-vectorize"
160 #define DEBUG_TYPE LV_NAME
161 
162 #ifndef NDEBUG
163 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
164 #endif
165 
166 /// @{
167 /// Metadata attribute names
168 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
169 const char LLVMLoopVectorizeFollowupVectorized[] =
170     "llvm.loop.vectorize.followup_vectorized";
171 const char LLVMLoopVectorizeFollowupEpilogue[] =
172     "llvm.loop.vectorize.followup_epilogue";
173 /// @}
174 
175 STATISTIC(LoopsVectorized, "Number of loops vectorized");
176 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
177 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
178 
179 static cl::opt<bool> EnableEpilogueVectorization(
180     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
181     cl::desc("Enable vectorization of epilogue loops."));
182 
183 static cl::opt<unsigned> EpilogueVectorizationForceVF(
184     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
185     cl::desc("When epilogue vectorization is enabled, and a value greater than "
186              "1 is specified, forces the given VF for all applicable epilogue "
187              "loops."));
188 
189 static cl::opt<unsigned> EpilogueVectorizationMinVF(
190     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
191     cl::desc("Only loops with vectorization factor equal to or larger than "
192              "the specified value are considered for epilogue vectorization."));
193 
194 /// Loops with a known constant trip count below this number are vectorized only
195 /// if no scalar iteration overheads are incurred.
196 static cl::opt<unsigned> TinyTripCountVectorThreshold(
197     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
198     cl::desc("Loops with a constant trip count that is smaller than this "
199              "value are vectorized only if no scalar iteration overheads "
200              "are incurred."));
201 
202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
203     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
204     cl::desc("The maximum allowed number of runtime memory checks with a "
205              "vectorize(enable) pragma."));
206 
207 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
208 // that predication is preferred, and this lists all options. I.e., the
209 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
210 // and predicate the instructions accordingly. If tail-folding fails, there are
211 // different fallback strategies depending on these values:
212 namespace PreferPredicateTy {
213   enum Option {
214     ScalarEpilogue = 0,
215     PredicateElseScalarEpilogue,
216     PredicateOrDontVectorize
217   };
218 } // namespace PreferPredicateTy
219 
220 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
221     "prefer-predicate-over-epilogue",
222     cl::init(PreferPredicateTy::ScalarEpilogue),
223     cl::Hidden,
224     cl::desc("Tail-folding and predication preferences over creating a scalar "
225              "epilogue loop."),
226     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
227                          "scalar-epilogue",
228                          "Don't tail-predicate loops, create scalar epilogue"),
229               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
230                          "predicate-else-scalar-epilogue",
231                          "prefer tail-folding, create scalar epilogue if tail "
232                          "folding fails."),
233               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
234                          "predicate-dont-vectorize",
235                          "prefers tail-folding, don't attempt vectorization if "
236                          "tail-folding fails.")));
237 
238 static cl::opt<bool> MaximizeBandwidth(
239     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
240     cl::desc("Maximize bandwidth when selecting vectorization factor which "
241              "will be determined by the smallest type in loop."));
242 
243 static cl::opt<bool> EnableInterleavedMemAccesses(
244     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
246 
247 /// An interleave-group may need masking if it resides in a block that needs
248 /// predication, or in order to mask away gaps.
249 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
250     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
251     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
252 
253 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
254     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
255     cl::desc("We don't interleave loops with a estimated constant trip count "
256              "below this number"));
257 
258 static cl::opt<unsigned> ForceTargetNumScalarRegs(
259     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
260     cl::desc("A flag that overrides the target's number of scalar registers."));
261 
262 static cl::opt<unsigned> ForceTargetNumVectorRegs(
263     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
264     cl::desc("A flag that overrides the target's number of vector registers."));
265 
266 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
267     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
268     cl::desc("A flag that overrides the target's max interleave factor for "
269              "scalar loops."));
270 
271 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
272     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
273     cl::desc("A flag that overrides the target's max interleave factor for "
274              "vectorized loops."));
275 
276 static cl::opt<unsigned> ForceTargetInstructionCost(
277     "force-target-instruction-cost", cl::init(0), cl::Hidden,
278     cl::desc("A flag that overrides the target's expected cost for "
279              "an instruction to a single constant value. Mostly "
280              "useful for getting consistent testing."));
281 
282 static cl::opt<bool> ForceTargetSupportsScalableVectors(
283     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
284     cl::desc(
285         "Pretend that scalable vectors are supported, even if the target does "
286         "not support them. This flag should only be used for testing."));
287 
288 static cl::opt<unsigned> SmallLoopCost(
289     "small-loop-cost", cl::init(20), cl::Hidden,
290     cl::desc(
291         "The cost of a loop that is considered 'small' by the interleaver."));
292 
293 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
294     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
295     cl::desc("Enable the use of the block frequency analysis to access PGO "
296              "heuristics minimizing code growth in cold regions and being more "
297              "aggressive in hot regions."));
298 
299 // Runtime interleave loops for load/store throughput.
300 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
301     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
302     cl::desc(
303         "Enable runtime interleaving until load/store ports are saturated"));
304 
305 /// Interleave small loops with scalar reductions.
306 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
307     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
308     cl::desc("Enable interleaving for loops with small iteration counts that "
309              "contain scalar reductions to expose ILP."));
310 
311 /// The number of stores in a loop that are allowed to need predication.
312 static cl::opt<unsigned> NumberOfStoresToPredicate(
313     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
314     cl::desc("Max number of stores to be predicated behind an if."));
315 
316 static cl::opt<bool> EnableIndVarRegisterHeur(
317     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
318     cl::desc("Count the induction variable only once when interleaving"));
319 
320 static cl::opt<bool> EnableCondStoresVectorization(
321     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
322     cl::desc("Enable if predication of stores during vectorization."));
323 
324 static cl::opt<unsigned> MaxNestedScalarReductionIC(
325     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
326     cl::desc("The maximum interleave count to use when interleaving a scalar "
327              "reduction in a nested loop."));
328 
329 static cl::opt<bool>
330     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
331                            cl::Hidden,
332                            cl::desc("Prefer in-loop vector reductions, "
333                                     "overriding the targets preference."));
334 
335 cl::opt<bool> EnableStrictReductions(
336     "enable-strict-reductions", cl::init(false), cl::Hidden,
337     cl::desc("Enable the vectorisation of loops with in-order (strict) "
338              "FP reductions"));
339 
340 static cl::opt<bool> PreferPredicatedReductionSelect(
341     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
342     cl::desc(
343         "Prefer predicating a reduction operation over an after loop select."));
344 
345 cl::opt<bool> EnableVPlanNativePath(
346     "enable-vplan-native-path", cl::init(false), cl::Hidden,
347     cl::desc("Enable VPlan-native vectorization path with "
348              "support for outer loop vectorization."));
349 
350 // FIXME: Remove this switch once we have divergence analysis. Currently we
351 // assume divergent non-backedge branches when this switch is true.
352 cl::opt<bool> EnableVPlanPredication(
353     "enable-vplan-predication", cl::init(false), cl::Hidden,
354     cl::desc("Enable VPlan-native vectorization path predicator with "
355              "support for outer loop vectorization."));
356 
357 // This flag enables the stress testing of the VPlan H-CFG construction in the
358 // VPlan-native vectorization path. It must be used in conjuction with
359 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
360 // verification of the H-CFGs built.
361 static cl::opt<bool> VPlanBuildStressTest(
362     "vplan-build-stress-test", cl::init(false), cl::Hidden,
363     cl::desc(
364         "Build VPlan for every supported loop nest in the function and bail "
365         "out right after the build (stress test the VPlan H-CFG construction "
366         "in the VPlan-native vectorization path)."));
367 
368 cl::opt<bool> llvm::EnableLoopInterleaving(
369     "interleave-loops", cl::init(true), cl::Hidden,
370     cl::desc("Enable loop interleaving in Loop vectorization passes"));
371 cl::opt<bool> llvm::EnableLoopVectorization(
372     "vectorize-loops", cl::init(true), cl::Hidden,
373     cl::desc("Run the Loop vectorization passes"));
374 
375 cl::opt<bool> PrintVPlansInDotFormat(
376     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
377     cl::desc("Use dot format instead of plain text when dumping VPlans"));
378 
379 /// A helper function that returns true if the given type is irregular. The
380 /// type is irregular if its allocated size doesn't equal the store size of an
381 /// element of the corresponding vector type.
382 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
383   // Determine if an array of N elements of type Ty is "bitcast compatible"
384   // with a <N x Ty> vector.
385   // This is only true if there is no padding between the array elements.
386   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
387 }
388 
389 /// A helper function that returns the reciprocal of the block probability of
390 /// predicated blocks. If we return X, we are assuming the predicated block
391 /// will execute once for every X iterations of the loop header.
392 ///
393 /// TODO: We should use actual block probability here, if available. Currently,
394 ///       we always assume predicated blocks have a 50% chance of executing.
395 static unsigned getReciprocalPredBlockProb() { return 2; }
396 
397 /// A helper function that returns an integer or floating-point constant with
398 /// value C.
399 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
400   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
401                            : ConstantFP::get(Ty, C);
402 }
403 
404 /// Returns "best known" trip count for the specified loop \p L as defined by
405 /// the following procedure:
406 ///   1) Returns exact trip count if it is known.
407 ///   2) Returns expected trip count according to profile data if any.
408 ///   3) Returns upper bound estimate if it is known.
409 ///   4) Returns None if all of the above failed.
410 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
411   // Check if exact trip count is known.
412   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
413     return ExpectedTC;
414 
415   // Check if there is an expected trip count available from profile data.
416   if (LoopVectorizeWithBlockFrequency)
417     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
418       return EstimatedTC;
419 
420   // Check if upper bound estimate is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
422     return ExpectedTC;
423 
424   return None;
425 }
426 
427 // Forward declare GeneratedRTChecks.
428 class GeneratedRTChecks;
429 
430 namespace llvm {
431 
432 /// InnerLoopVectorizer vectorizes loops which contain only one basic
433 /// block to a specified vectorization factor (VF).
434 /// This class performs the widening of scalars into vectors, or multiple
435 /// scalars. This class also implements the following features:
436 /// * It inserts an epilogue loop for handling loops that don't have iteration
437 ///   counts that are known to be a multiple of the vectorization factor.
438 /// * It handles the code generation for reduction variables.
439 /// * Scalarization (implementation using scalars) of un-vectorizable
440 ///   instructions.
441 /// InnerLoopVectorizer does not perform any vectorization-legality
442 /// checks, and relies on the caller to check for the different legality
443 /// aspects. The InnerLoopVectorizer relies on the
444 /// LoopVectorizationLegality class to provide information about the induction
445 /// and reduction variables that were found to a given vectorization factor.
446 class InnerLoopVectorizer {
447 public:
448   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
449                       LoopInfo *LI, DominatorTree *DT,
450                       const TargetLibraryInfo *TLI,
451                       const TargetTransformInfo *TTI, AssumptionCache *AC,
452                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
453                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
454                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
455                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
456       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
457         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
458         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
459         PSI(PSI), RTChecks(RTChecks) {
460     // Query this against the original loop and save it here because the profile
461     // of the original loop header may change as the transformation happens.
462     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
463         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
464   }
465 
466   virtual ~InnerLoopVectorizer() = default;
467 
468   /// Create a new empty loop that will contain vectorized instructions later
469   /// on, while the old loop will be used as the scalar remainder. Control flow
470   /// is generated around the vectorized (and scalar epilogue) loops consisting
471   /// of various checks and bypasses. Return the pre-header block of the new
472   /// loop.
473   /// In the case of epilogue vectorization, this function is overriden to
474   /// handle the more complex control flow around the loops.
475   virtual BasicBlock *createVectorizedLoopSkeleton();
476 
477   /// Widen a single instruction within the innermost loop.
478   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
479                         VPTransformState &State);
480 
481   /// Widen a single call instruction within the innermost loop.
482   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
483                             VPTransformState &State);
484 
485   /// Widen a single select instruction within the innermost loop.
486   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
487                               bool InvariantCond, VPTransformState &State);
488 
489   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
490   void fixVectorizedLoop(VPTransformState &State);
491 
492   // Return true if any runtime check is added.
493   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
494 
495   /// A type for vectorized values in the new loop. Each value from the
496   /// original loop, when vectorized, is represented by UF vector values in the
497   /// new unrolled loop, where UF is the unroll factor.
498   using VectorParts = SmallVector<Value *, 2>;
499 
500   /// Vectorize a single GetElementPtrInst based on information gathered and
501   /// decisions taken during planning.
502   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
503                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
504                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
505 
506   /// Vectorize a single PHINode in a block. This method handles the induction
507   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
508   /// arbitrary length vectors.
509   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
510                            VPWidenPHIRecipe *PhiR, VPTransformState &State);
511 
512   /// A helper function to scalarize a single Instruction in the innermost loop.
513   /// Generates a sequence of scalar instances for each lane between \p MinLane
514   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
515   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
516   /// Instr's operands.
517   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
518                             const VPIteration &Instance, bool IfPredicateInstr,
519                             VPTransformState &State);
520 
521   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
522   /// is provided, the integer induction variable will first be truncated to
523   /// the corresponding type.
524   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
525                              VPValue *Def, VPValue *CastDef,
526                              VPTransformState &State);
527 
528   /// Construct the vector value of a scalarized value \p V one lane at a time.
529   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
530                                  VPTransformState &State);
531 
532   /// Try to vectorize interleaved access group \p Group with the base address
533   /// given in \p Addr, optionally masking the vector operations if \p
534   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
535   /// values in the vectorized loop.
536   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
537                                 ArrayRef<VPValue *> VPDefs,
538                                 VPTransformState &State, VPValue *Addr,
539                                 ArrayRef<VPValue *> StoredValues,
540                                 VPValue *BlockInMask = nullptr);
541 
542   /// Vectorize Load and Store instructions with the base address given in \p
543   /// Addr, optionally masking the vector operations if \p BlockInMask is
544   /// non-null. Use \p State to translate given VPValues to IR values in the
545   /// vectorized loop.
546   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
547                                   VPValue *Def, VPValue *Addr,
548                                   VPValue *StoredValue, VPValue *BlockInMask);
549 
550   /// Set the debug location in the builder using the debug location in
551   /// the instruction.
552   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
553 
554   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
555   void fixNonInductionPHIs(VPTransformState &State);
556 
557   /// Returns true if the reordering of FP operations is not allowed, but we are
558   /// able to vectorize with strict in-order reductions for the given RdxDesc.
559   bool useOrderedReductions(RecurrenceDescriptor &RdxDesc);
560 
561   /// Create a broadcast instruction. This method generates a broadcast
562   /// instruction (shuffle) for loop invariant values and for the induction
563   /// value. If this is the induction variable then we extend it to N, N+1, ...
564   /// this is needed because each iteration in the loop corresponds to a SIMD
565   /// element.
566   virtual Value *getBroadcastInstrs(Value *V);
567 
568 protected:
569   friend class LoopVectorizationPlanner;
570 
571   /// A small list of PHINodes.
572   using PhiVector = SmallVector<PHINode *, 4>;
573 
574   /// A type for scalarized values in the new loop. Each value from the
575   /// original loop, when scalarized, is represented by UF x VF scalar values
576   /// in the new unrolled loop, where UF is the unroll factor and VF is the
577   /// vectorization factor.
578   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
579 
580   /// Set up the values of the IVs correctly when exiting the vector loop.
581   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
582                     Value *CountRoundDown, Value *EndValue,
583                     BasicBlock *MiddleBlock);
584 
585   /// Create a new induction variable inside L.
586   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
587                                    Value *Step, Instruction *DL);
588 
589   /// Handle all cross-iteration phis in the header.
590   void fixCrossIterationPHIs(VPTransformState &State);
591 
592   /// Fix a first-order recurrence. This is the second phase of vectorizing
593   /// this phi node.
594   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
595 
596   /// Fix a reduction cross-iteration phi. This is the second phase of
597   /// vectorizing this phi node.
598   void fixReduction(VPWidenPHIRecipe *Phi, VPTransformState &State);
599 
600   /// Clear NSW/NUW flags from reduction instructions if necessary.
601   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
602                                VPTransformState &State);
603 
604   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
605   /// means we need to add the appropriate incoming value from the middle
606   /// block as exiting edges from the scalar epilogue loop (if present) are
607   /// already in place, and we exit the vector loop exclusively to the middle
608   /// block.
609   void fixLCSSAPHIs(VPTransformState &State);
610 
611   /// Iteratively sink the scalarized operands of a predicated instruction into
612   /// the block that was created for it.
613   void sinkScalarOperands(Instruction *PredInst);
614 
615   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
616   /// represented as.
617   void truncateToMinimalBitwidths(VPTransformState &State);
618 
619   /// This function adds
620   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
621   /// to each vector element of Val. The sequence starts at StartIndex.
622   /// \p Opcode is relevant for FP induction variable.
623   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
624                                Instruction::BinaryOps Opcode =
625                                Instruction::BinaryOpsEnd);
626 
627   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
628   /// variable on which to base the steps, \p Step is the size of the step, and
629   /// \p EntryVal is the value from the original loop that maps to the steps.
630   /// Note that \p EntryVal doesn't have to be an induction variable - it
631   /// can also be a truncate instruction.
632   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
633                         const InductionDescriptor &ID, VPValue *Def,
634                         VPValue *CastDef, VPTransformState &State);
635 
636   /// Create a vector induction phi node based on an existing scalar one. \p
637   /// EntryVal is the value from the original loop that maps to the vector phi
638   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
639   /// truncate instruction, instead of widening the original IV, we widen a
640   /// version of the IV truncated to \p EntryVal's type.
641   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
642                                        Value *Step, Value *Start,
643                                        Instruction *EntryVal, VPValue *Def,
644                                        VPValue *CastDef,
645                                        VPTransformState &State);
646 
647   /// Returns true if an instruction \p I should be scalarized instead of
648   /// vectorized for the chosen vectorization factor.
649   bool shouldScalarizeInstruction(Instruction *I) const;
650 
651   /// Returns true if we should generate a scalar version of \p IV.
652   bool needsScalarInduction(Instruction *IV) const;
653 
654   /// If there is a cast involved in the induction variable \p ID, which should
655   /// be ignored in the vectorized loop body, this function records the
656   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
657   /// cast. We had already proved that the casted Phi is equal to the uncasted
658   /// Phi in the vectorized loop (under a runtime guard), and therefore
659   /// there is no need to vectorize the cast - the same value can be used in the
660   /// vector loop for both the Phi and the cast.
661   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
662   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
663   ///
664   /// \p EntryVal is the value from the original loop that maps to the vector
665   /// phi node and is used to distinguish what is the IV currently being
666   /// processed - original one (if \p EntryVal is a phi corresponding to the
667   /// original IV) or the "newly-created" one based on the proof mentioned above
668   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
669   /// latter case \p EntryVal is a TruncInst and we must not record anything for
670   /// that IV, but it's error-prone to expect callers of this routine to care
671   /// about that, hence this explicit parameter.
672   void recordVectorLoopValueForInductionCast(
673       const InductionDescriptor &ID, const Instruction *EntryVal,
674       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
675       unsigned Part, unsigned Lane = UINT_MAX);
676 
677   /// Generate a shuffle sequence that will reverse the vector Vec.
678   virtual Value *reverseVector(Value *Vec);
679 
680   /// Returns (and creates if needed) the original loop trip count.
681   Value *getOrCreateTripCount(Loop *NewLoop);
682 
683   /// Returns (and creates if needed) the trip count of the widened loop.
684   Value *getOrCreateVectorTripCount(Loop *NewLoop);
685 
686   /// Returns a bitcasted value to the requested vector type.
687   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
688   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
689                                 const DataLayout &DL);
690 
691   /// Emit a bypass check to see if the vector trip count is zero, including if
692   /// it overflows.
693   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
694 
695   /// Emit a bypass check to see if all of the SCEV assumptions we've
696   /// had to make are correct. Returns the block containing the checks or
697   /// nullptr if no checks have been added.
698   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
699 
700   /// Emit bypass checks to check any memory assumptions we may have made.
701   /// Returns the block containing the checks or nullptr if no checks have been
702   /// added.
703   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
704 
705   /// Compute the transformed value of Index at offset StartValue using step
706   /// StepValue.
707   /// For integer induction, returns StartValue + Index * StepValue.
708   /// For pointer induction, returns StartValue[Index * StepValue].
709   /// FIXME: The newly created binary instructions should contain nsw/nuw
710   /// flags, which can be found from the original scalar operations.
711   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
712                               const DataLayout &DL,
713                               const InductionDescriptor &ID) const;
714 
715   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
716   /// vector loop preheader, middle block and scalar preheader. Also
717   /// allocate a loop object for the new vector loop and return it.
718   Loop *createVectorLoopSkeleton(StringRef Prefix);
719 
720   /// Create new phi nodes for the induction variables to resume iteration count
721   /// in the scalar epilogue, from where the vectorized loop left off (given by
722   /// \p VectorTripCount).
723   /// In cases where the loop skeleton is more complicated (eg. epilogue
724   /// vectorization) and the resume values can come from an additional bypass
725   /// block, the \p AdditionalBypass pair provides information about the bypass
726   /// block and the end value on the edge from bypass to this loop.
727   void createInductionResumeValues(
728       Loop *L, Value *VectorTripCount,
729       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
730 
731   /// Complete the loop skeleton by adding debug MDs, creating appropriate
732   /// conditional branches in the middle block, preparing the builder and
733   /// running the verifier. Take in the vector loop \p L as argument, and return
734   /// the preheader of the completed vector loop.
735   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
736 
737   /// Add additional metadata to \p To that was not present on \p Orig.
738   ///
739   /// Currently this is used to add the noalias annotations based on the
740   /// inserted memchecks.  Use this for instructions that are *cloned* into the
741   /// vector loop.
742   void addNewMetadata(Instruction *To, const Instruction *Orig);
743 
744   /// Add metadata from one instruction to another.
745   ///
746   /// This includes both the original MDs from \p From and additional ones (\see
747   /// addNewMetadata).  Use this for *newly created* instructions in the vector
748   /// loop.
749   void addMetadata(Instruction *To, Instruction *From);
750 
751   /// Similar to the previous function but it adds the metadata to a
752   /// vector of instructions.
753   void addMetadata(ArrayRef<Value *> To, Instruction *From);
754 
755   /// Allow subclasses to override and print debug traces before/after vplan
756   /// execution, when trace information is requested.
757   virtual void printDebugTracesAtStart(){};
758   virtual void printDebugTracesAtEnd(){};
759 
760   /// The original loop.
761   Loop *OrigLoop;
762 
763   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
764   /// dynamic knowledge to simplify SCEV expressions and converts them to a
765   /// more usable form.
766   PredicatedScalarEvolution &PSE;
767 
768   /// Loop Info.
769   LoopInfo *LI;
770 
771   /// Dominator Tree.
772   DominatorTree *DT;
773 
774   /// Alias Analysis.
775   AAResults *AA;
776 
777   /// Target Library Info.
778   const TargetLibraryInfo *TLI;
779 
780   /// Target Transform Info.
781   const TargetTransformInfo *TTI;
782 
783   /// Assumption Cache.
784   AssumptionCache *AC;
785 
786   /// Interface to emit optimization remarks.
787   OptimizationRemarkEmitter *ORE;
788 
789   /// LoopVersioning.  It's only set up (non-null) if memchecks were
790   /// used.
791   ///
792   /// This is currently only used to add no-alias metadata based on the
793   /// memchecks.  The actually versioning is performed manually.
794   std::unique_ptr<LoopVersioning> LVer;
795 
796   /// The vectorization SIMD factor to use. Each vector will have this many
797   /// vector elements.
798   ElementCount VF;
799 
800   /// The vectorization unroll factor to use. Each scalar is vectorized to this
801   /// many different vector instructions.
802   unsigned UF;
803 
804   /// The builder that we use
805   IRBuilder<> Builder;
806 
807   // --- Vectorization state ---
808 
809   /// The vector-loop preheader.
810   BasicBlock *LoopVectorPreHeader;
811 
812   /// The scalar-loop preheader.
813   BasicBlock *LoopScalarPreHeader;
814 
815   /// Middle Block between the vector and the scalar.
816   BasicBlock *LoopMiddleBlock;
817 
818   /// The (unique) ExitBlock of the scalar loop.  Note that
819   /// there can be multiple exiting edges reaching this block.
820   BasicBlock *LoopExitBlock;
821 
822   /// The vector loop body.
823   BasicBlock *LoopVectorBody;
824 
825   /// The scalar loop body.
826   BasicBlock *LoopScalarBody;
827 
828   /// A list of all bypass blocks. The first block is the entry of the loop.
829   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
830 
831   /// The new Induction variable which was added to the new block.
832   PHINode *Induction = nullptr;
833 
834   /// The induction variable of the old basic block.
835   PHINode *OldInduction = nullptr;
836 
837   /// Store instructions that were predicated.
838   SmallVector<Instruction *, 4> PredicatedInstructions;
839 
840   /// Trip count of the original loop.
841   Value *TripCount = nullptr;
842 
843   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
844   Value *VectorTripCount = nullptr;
845 
846   /// The legality analysis.
847   LoopVectorizationLegality *Legal;
848 
849   /// The profitablity analysis.
850   LoopVectorizationCostModel *Cost;
851 
852   // Record whether runtime checks are added.
853   bool AddedSafetyChecks = false;
854 
855   // Holds the end values for each induction variable. We save the end values
856   // so we can later fix-up the external users of the induction variables.
857   DenseMap<PHINode *, Value *> IVEndValues;
858 
859   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
860   // fixed up at the end of vector code generation.
861   SmallVector<PHINode *, 8> OrigPHIsToFix;
862 
863   /// BFI and PSI are used to check for profile guided size optimizations.
864   BlockFrequencyInfo *BFI;
865   ProfileSummaryInfo *PSI;
866 
867   // Whether this loop should be optimized for size based on profile guided size
868   // optimizatios.
869   bool OptForSizeBasedOnProfile;
870 
871   /// Structure to hold information about generated runtime checks, responsible
872   /// for cleaning the checks, if vectorization turns out unprofitable.
873   GeneratedRTChecks &RTChecks;
874 };
875 
876 class InnerLoopUnroller : public InnerLoopVectorizer {
877 public:
878   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
879                     LoopInfo *LI, DominatorTree *DT,
880                     const TargetLibraryInfo *TLI,
881                     const TargetTransformInfo *TTI, AssumptionCache *AC,
882                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
883                     LoopVectorizationLegality *LVL,
884                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
885                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
886       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
887                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
888                             BFI, PSI, Check) {}
889 
890 private:
891   Value *getBroadcastInstrs(Value *V) override;
892   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
893                        Instruction::BinaryOps Opcode =
894                        Instruction::BinaryOpsEnd) override;
895   Value *reverseVector(Value *Vec) override;
896 };
897 
898 /// Encapsulate information regarding vectorization of a loop and its epilogue.
899 /// This information is meant to be updated and used across two stages of
900 /// epilogue vectorization.
901 struct EpilogueLoopVectorizationInfo {
902   ElementCount MainLoopVF = ElementCount::getFixed(0);
903   unsigned MainLoopUF = 0;
904   ElementCount EpilogueVF = ElementCount::getFixed(0);
905   unsigned EpilogueUF = 0;
906   BasicBlock *MainLoopIterationCountCheck = nullptr;
907   BasicBlock *EpilogueIterationCountCheck = nullptr;
908   BasicBlock *SCEVSafetyCheck = nullptr;
909   BasicBlock *MemSafetyCheck = nullptr;
910   Value *TripCount = nullptr;
911   Value *VectorTripCount = nullptr;
912 
913   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
914                                 unsigned EUF)
915       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
916         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
917     assert(EUF == 1 &&
918            "A high UF for the epilogue loop is likely not beneficial.");
919   }
920 };
921 
922 /// An extension of the inner loop vectorizer that creates a skeleton for a
923 /// vectorized loop that has its epilogue (residual) also vectorized.
924 /// The idea is to run the vplan on a given loop twice, firstly to setup the
925 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
926 /// from the first step and vectorize the epilogue.  This is achieved by
927 /// deriving two concrete strategy classes from this base class and invoking
928 /// them in succession from the loop vectorizer planner.
929 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
930 public:
931   InnerLoopAndEpilogueVectorizer(
932       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
933       DominatorTree *DT, const TargetLibraryInfo *TLI,
934       const TargetTransformInfo *TTI, AssumptionCache *AC,
935       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
936       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
937       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
938       GeneratedRTChecks &Checks)
939       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
940                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
941                             Checks),
942         EPI(EPI) {}
943 
944   // Override this function to handle the more complex control flow around the
945   // three loops.
946   BasicBlock *createVectorizedLoopSkeleton() final override {
947     return createEpilogueVectorizedLoopSkeleton();
948   }
949 
950   /// The interface for creating a vectorized skeleton using one of two
951   /// different strategies, each corresponding to one execution of the vplan
952   /// as described above.
953   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
954 
955   /// Holds and updates state information required to vectorize the main loop
956   /// and its epilogue in two separate passes. This setup helps us avoid
957   /// regenerating and recomputing runtime safety checks. It also helps us to
958   /// shorten the iteration-count-check path length for the cases where the
959   /// iteration count of the loop is so small that the main vector loop is
960   /// completely skipped.
961   EpilogueLoopVectorizationInfo &EPI;
962 };
963 
964 /// A specialized derived class of inner loop vectorizer that performs
965 /// vectorization of *main* loops in the process of vectorizing loops and their
966 /// epilogues.
967 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
968 public:
969   EpilogueVectorizerMainLoop(
970       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
971       DominatorTree *DT, const TargetLibraryInfo *TLI,
972       const TargetTransformInfo *TTI, AssumptionCache *AC,
973       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
974       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
975       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
976       GeneratedRTChecks &Check)
977       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
978                                        EPI, LVL, CM, BFI, PSI, Check) {}
979   /// Implements the interface for creating a vectorized skeleton using the
980   /// *main loop* strategy (ie the first pass of vplan execution).
981   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
982 
983 protected:
984   /// Emits an iteration count bypass check once for the main loop (when \p
985   /// ForEpilogue is false) and once for the epilogue loop (when \p
986   /// ForEpilogue is true).
987   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
988                                              bool ForEpilogue);
989   void printDebugTracesAtStart() override;
990   void printDebugTracesAtEnd() override;
991 };
992 
993 // A specialized derived class of inner loop vectorizer that performs
994 // vectorization of *epilogue* loops in the process of vectorizing loops and
995 // their epilogues.
996 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
997 public:
998   EpilogueVectorizerEpilogueLoop(
999       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1000       DominatorTree *DT, const TargetLibraryInfo *TLI,
1001       const TargetTransformInfo *TTI, AssumptionCache *AC,
1002       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1003       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1004       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1005       GeneratedRTChecks &Checks)
1006       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1007                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1008   /// Implements the interface for creating a vectorized skeleton using the
1009   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1010   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1011 
1012 protected:
1013   /// Emits an iteration count bypass check after the main vector loop has
1014   /// finished to see if there are any iterations left to execute by either
1015   /// the vector epilogue or the scalar epilogue.
1016   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1017                                                       BasicBlock *Bypass,
1018                                                       BasicBlock *Insert);
1019   void printDebugTracesAtStart() override;
1020   void printDebugTracesAtEnd() override;
1021 };
1022 } // end namespace llvm
1023 
1024 /// Look for a meaningful debug location on the instruction or it's
1025 /// operands.
1026 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1027   if (!I)
1028     return I;
1029 
1030   DebugLoc Empty;
1031   if (I->getDebugLoc() != Empty)
1032     return I;
1033 
1034   for (Use &Op : I->operands()) {
1035     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1036       if (OpInst->getDebugLoc() != Empty)
1037         return OpInst;
1038   }
1039 
1040   return I;
1041 }
1042 
1043 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1044   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1045     const DILocation *DIL = Inst->getDebugLoc();
1046 
1047     // When a FSDiscriminator is enabled, we don't need to add the multiply
1048     // factors to the discriminators.
1049     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1050         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1051       // FIXME: For scalable vectors, assume vscale=1.
1052       auto NewDIL =
1053           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1054       if (NewDIL)
1055         B.SetCurrentDebugLocation(NewDIL.getValue());
1056       else
1057         LLVM_DEBUG(dbgs()
1058                    << "Failed to create new discriminator: "
1059                    << DIL->getFilename() << " Line: " << DIL->getLine());
1060     } else
1061       B.SetCurrentDebugLocation(DIL);
1062   } else
1063     B.SetCurrentDebugLocation(DebugLoc());
1064 }
1065 
1066 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1067 /// is passed, the message relates to that particular instruction.
1068 #ifndef NDEBUG
1069 static void debugVectorizationMessage(const StringRef Prefix,
1070                                       const StringRef DebugMsg,
1071                                       Instruction *I) {
1072   dbgs() << "LV: " << Prefix << DebugMsg;
1073   if (I != nullptr)
1074     dbgs() << " " << *I;
1075   else
1076     dbgs() << '.';
1077   dbgs() << '\n';
1078 }
1079 #endif
1080 
1081 /// Create an analysis remark that explains why vectorization failed
1082 ///
1083 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1084 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1085 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1086 /// the location of the remark.  \return the remark object that can be
1087 /// streamed to.
1088 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1089     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1090   Value *CodeRegion = TheLoop->getHeader();
1091   DebugLoc DL = TheLoop->getStartLoc();
1092 
1093   if (I) {
1094     CodeRegion = I->getParent();
1095     // If there is no debug location attached to the instruction, revert back to
1096     // using the loop's.
1097     if (I->getDebugLoc())
1098       DL = I->getDebugLoc();
1099   }
1100 
1101   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1102 }
1103 
1104 /// Return a value for Step multiplied by VF.
1105 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1106   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1107   Constant *StepVal = ConstantInt::get(
1108       Step->getType(),
1109       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1110   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1111 }
1112 
1113 namespace llvm {
1114 
1115 /// Return the runtime value for VF.
1116 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1117   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1118   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1119 }
1120 
1121 void reportVectorizationFailure(const StringRef DebugMsg,
1122                                 const StringRef OREMsg, const StringRef ORETag,
1123                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1124                                 Instruction *I) {
1125   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1126   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1127   ORE->emit(
1128       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1129       << "loop not vectorized: " << OREMsg);
1130 }
1131 
1132 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1133                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1134                              Instruction *I) {
1135   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1136   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1137   ORE->emit(
1138       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1139       << Msg);
1140 }
1141 
1142 } // end namespace llvm
1143 
1144 #ifndef NDEBUG
1145 /// \return string containing a file name and a line # for the given loop.
1146 static std::string getDebugLocString(const Loop *L) {
1147   std::string Result;
1148   if (L) {
1149     raw_string_ostream OS(Result);
1150     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1151       LoopDbgLoc.print(OS);
1152     else
1153       // Just print the module name.
1154       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1155     OS.flush();
1156   }
1157   return Result;
1158 }
1159 #endif
1160 
1161 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1162                                          const Instruction *Orig) {
1163   // If the loop was versioned with memchecks, add the corresponding no-alias
1164   // metadata.
1165   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1166     LVer->annotateInstWithNoAlias(To, Orig);
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(Instruction *To,
1170                                       Instruction *From) {
1171   propagateMetadata(To, From);
1172   addNewMetadata(To, From);
1173 }
1174 
1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1176                                       Instruction *From) {
1177   for (Value *V : To) {
1178     if (Instruction *I = dyn_cast<Instruction>(V))
1179       addMetadata(I, From);
1180   }
1181 }
1182 
1183 namespace llvm {
1184 
1185 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1186 // lowered.
1187 enum ScalarEpilogueLowering {
1188 
1189   // The default: allowing scalar epilogues.
1190   CM_ScalarEpilogueAllowed,
1191 
1192   // Vectorization with OptForSize: don't allow epilogues.
1193   CM_ScalarEpilogueNotAllowedOptSize,
1194 
1195   // A special case of vectorisation with OptForSize: loops with a very small
1196   // trip count are considered for vectorization under OptForSize, thereby
1197   // making sure the cost of their loop body is dominant, free of runtime
1198   // guards and scalar iteration overheads.
1199   CM_ScalarEpilogueNotAllowedLowTripLoop,
1200 
1201   // Loop hint predicate indicating an epilogue is undesired.
1202   CM_ScalarEpilogueNotNeededUsePredicate,
1203 
1204   // Directive indicating we must either tail fold or not vectorize
1205   CM_ScalarEpilogueNotAllowedUsePredicate
1206 };
1207 
1208 /// ElementCountComparator creates a total ordering for ElementCount
1209 /// for the purposes of using it in a set structure.
1210 struct ElementCountComparator {
1211   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1212     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1213            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1214   }
1215 };
1216 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1217 
1218 /// LoopVectorizationCostModel - estimates the expected speedups due to
1219 /// vectorization.
1220 /// In many cases vectorization is not profitable. This can happen because of
1221 /// a number of reasons. In this class we mainly attempt to predict the
1222 /// expected speedup/slowdowns due to the supported instruction set. We use the
1223 /// TargetTransformInfo to query the different backends for the cost of
1224 /// different operations.
1225 class LoopVectorizationCostModel {
1226 public:
1227   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1228                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1229                              LoopVectorizationLegality *Legal,
1230                              const TargetTransformInfo &TTI,
1231                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1232                              AssumptionCache *AC,
1233                              OptimizationRemarkEmitter *ORE, const Function *F,
1234                              const LoopVectorizeHints *Hints,
1235                              InterleavedAccessInfo &IAI)
1236       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1237         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1238         Hints(Hints), InterleaveInfo(IAI) {}
1239 
1240   /// \return An upper bound for the vectorization factors (both fixed and
1241   /// scalable). If the factors are 0, vectorization and interleaving should be
1242   /// avoided up front.
1243   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1244 
1245   /// \return True if runtime checks are required for vectorization, and false
1246   /// otherwise.
1247   bool runtimeChecksRequired();
1248 
1249   /// \return The most profitable vectorization factor and the cost of that VF.
1250   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1251   /// then this vectorization factor will be selected if vectorization is
1252   /// possible.
1253   VectorizationFactor
1254   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1255 
1256   VectorizationFactor
1257   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1258                                     const LoopVectorizationPlanner &LVP);
1259 
1260   /// Setup cost-based decisions for user vectorization factor.
1261   void selectUserVectorizationFactor(ElementCount UserVF) {
1262     collectUniformsAndScalars(UserVF);
1263     collectInstsToScalarize(UserVF);
1264   }
1265 
1266   /// \return The size (in bits) of the smallest and widest types in the code
1267   /// that needs to be vectorized. We ignore values that remain scalar such as
1268   /// 64 bit loop indices.
1269   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1270 
1271   /// \return The desired interleave count.
1272   /// If interleave count has been specified by metadata it will be returned.
1273   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1274   /// are the selected vectorization factor and the cost of the selected VF.
1275   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1276 
1277   /// Memory access instruction may be vectorized in more than one way.
1278   /// Form of instruction after vectorization depends on cost.
1279   /// This function takes cost-based decisions for Load/Store instructions
1280   /// and collects them in a map. This decisions map is used for building
1281   /// the lists of loop-uniform and loop-scalar instructions.
1282   /// The calculated cost is saved with widening decision in order to
1283   /// avoid redundant calculations.
1284   void setCostBasedWideningDecision(ElementCount VF);
1285 
1286   /// A struct that represents some properties of the register usage
1287   /// of a loop.
1288   struct RegisterUsage {
1289     /// Holds the number of loop invariant values that are used in the loop.
1290     /// The key is ClassID of target-provided register class.
1291     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1292     /// Holds the maximum number of concurrent live intervals in the loop.
1293     /// The key is ClassID of target-provided register class.
1294     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1295   };
1296 
1297   /// \return Returns information about the register usages of the loop for the
1298   /// given vectorization factors.
1299   SmallVector<RegisterUsage, 8>
1300   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1301 
1302   /// Collect values we want to ignore in the cost model.
1303   void collectValuesToIgnore();
1304 
1305   /// Split reductions into those that happen in the loop, and those that happen
1306   /// outside. In loop reductions are collected into InLoopReductionChains.
1307   void collectInLoopReductions();
1308 
1309   /// Returns true if we should use strict in-order reductions for the given
1310   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1311   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1312   /// of FP operations.
1313   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1314     return EnableStrictReductions && !Hints->allowReordering() &&
1315            RdxDesc.isOrdered();
1316   }
1317 
1318   /// \returns The smallest bitwidth each instruction can be represented with.
1319   /// The vector equivalents of these instructions should be truncated to this
1320   /// type.
1321   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1322     return MinBWs;
1323   }
1324 
1325   /// \returns True if it is more profitable to scalarize instruction \p I for
1326   /// vectorization factor \p VF.
1327   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1328     assert(VF.isVector() &&
1329            "Profitable to scalarize relevant only for VF > 1.");
1330 
1331     // Cost model is not run in the VPlan-native path - return conservative
1332     // result until this changes.
1333     if (EnableVPlanNativePath)
1334       return false;
1335 
1336     auto Scalars = InstsToScalarize.find(VF);
1337     assert(Scalars != InstsToScalarize.end() &&
1338            "VF not yet analyzed for scalarization profitability");
1339     return Scalars->second.find(I) != Scalars->second.end();
1340   }
1341 
1342   /// Returns true if \p I is known to be uniform after vectorization.
1343   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1344     if (VF.isScalar())
1345       return true;
1346 
1347     // Cost model is not run in the VPlan-native path - return conservative
1348     // result until this changes.
1349     if (EnableVPlanNativePath)
1350       return false;
1351 
1352     auto UniformsPerVF = Uniforms.find(VF);
1353     assert(UniformsPerVF != Uniforms.end() &&
1354            "VF not yet analyzed for uniformity");
1355     return UniformsPerVF->second.count(I);
1356   }
1357 
1358   /// Returns true if \p I is known to be scalar after vectorization.
1359   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1360     if (VF.isScalar())
1361       return true;
1362 
1363     // Cost model is not run in the VPlan-native path - return conservative
1364     // result until this changes.
1365     if (EnableVPlanNativePath)
1366       return false;
1367 
1368     auto ScalarsPerVF = Scalars.find(VF);
1369     assert(ScalarsPerVF != Scalars.end() &&
1370            "Scalar values are not calculated for VF");
1371     return ScalarsPerVF->second.count(I);
1372   }
1373 
1374   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1375   /// for vectorization factor \p VF.
1376   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1377     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1378            !isProfitableToScalarize(I, VF) &&
1379            !isScalarAfterVectorization(I, VF);
1380   }
1381 
1382   /// Decision that was taken during cost calculation for memory instruction.
1383   enum InstWidening {
1384     CM_Unknown,
1385     CM_Widen,         // For consecutive accesses with stride +1.
1386     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1387     CM_Interleave,
1388     CM_GatherScatter,
1389     CM_Scalarize
1390   };
1391 
1392   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1393   /// instruction \p I and vector width \p VF.
1394   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1395                            InstructionCost Cost) {
1396     assert(VF.isVector() && "Expected VF >=2");
1397     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1398   }
1399 
1400   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1401   /// interleaving group \p Grp and vector width \p VF.
1402   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1403                            ElementCount VF, InstWidening W,
1404                            InstructionCost Cost) {
1405     assert(VF.isVector() && "Expected VF >=2");
1406     /// Broadcast this decicion to all instructions inside the group.
1407     /// But the cost will be assigned to one instruction only.
1408     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1409       if (auto *I = Grp->getMember(i)) {
1410         if (Grp->getInsertPos() == I)
1411           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1412         else
1413           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1414       }
1415     }
1416   }
1417 
1418   /// Return the cost model decision for the given instruction \p I and vector
1419   /// width \p VF. Return CM_Unknown if this instruction did not pass
1420   /// through the cost modeling.
1421   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1422     assert(VF.isVector() && "Expected VF to be a vector VF");
1423     // Cost model is not run in the VPlan-native path - return conservative
1424     // result until this changes.
1425     if (EnableVPlanNativePath)
1426       return CM_GatherScatter;
1427 
1428     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1429     auto Itr = WideningDecisions.find(InstOnVF);
1430     if (Itr == WideningDecisions.end())
1431       return CM_Unknown;
1432     return Itr->second.first;
1433   }
1434 
1435   /// Return the vectorization cost for the given instruction \p I and vector
1436   /// width \p VF.
1437   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1438     assert(VF.isVector() && "Expected VF >=2");
1439     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1440     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1441            "The cost is not calculated");
1442     return WideningDecisions[InstOnVF].second;
1443   }
1444 
1445   /// Return True if instruction \p I is an optimizable truncate whose operand
1446   /// is an induction variable. Such a truncate will be removed by adding a new
1447   /// induction variable with the destination type.
1448   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1449     // If the instruction is not a truncate, return false.
1450     auto *Trunc = dyn_cast<TruncInst>(I);
1451     if (!Trunc)
1452       return false;
1453 
1454     // Get the source and destination types of the truncate.
1455     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1456     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1457 
1458     // If the truncate is free for the given types, return false. Replacing a
1459     // free truncate with an induction variable would add an induction variable
1460     // update instruction to each iteration of the loop. We exclude from this
1461     // check the primary induction variable since it will need an update
1462     // instruction regardless.
1463     Value *Op = Trunc->getOperand(0);
1464     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1465       return false;
1466 
1467     // If the truncated value is not an induction variable, return false.
1468     return Legal->isInductionPhi(Op);
1469   }
1470 
1471   /// Collects the instructions to scalarize for each predicated instruction in
1472   /// the loop.
1473   void collectInstsToScalarize(ElementCount VF);
1474 
1475   /// Collect Uniform and Scalar values for the given \p VF.
1476   /// The sets depend on CM decision for Load/Store instructions
1477   /// that may be vectorized as interleave, gather-scatter or scalarized.
1478   void collectUniformsAndScalars(ElementCount VF) {
1479     // Do the analysis once.
1480     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1481       return;
1482     setCostBasedWideningDecision(VF);
1483     collectLoopUniforms(VF);
1484     collectLoopScalars(VF);
1485   }
1486 
1487   /// Returns true if the target machine supports masked store operation
1488   /// for the given \p DataType and kind of access to \p Ptr.
1489   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1490     return Legal->isConsecutivePtr(Ptr) &&
1491            TTI.isLegalMaskedStore(DataType, Alignment);
1492   }
1493 
1494   /// Returns true if the target machine supports masked load operation
1495   /// for the given \p DataType and kind of access to \p Ptr.
1496   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1497     return Legal->isConsecutivePtr(Ptr) &&
1498            TTI.isLegalMaskedLoad(DataType, Alignment);
1499   }
1500 
1501   /// Returns true if the target machine can represent \p V as a masked gather
1502   /// or scatter operation.
1503   bool isLegalGatherOrScatter(Value *V) {
1504     bool LI = isa<LoadInst>(V);
1505     bool SI = isa<StoreInst>(V);
1506     if (!LI && !SI)
1507       return false;
1508     auto *Ty = getLoadStoreType(V);
1509     Align Align = getLoadStoreAlignment(V);
1510     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1511            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1512   }
1513 
1514   /// Returns true if the target machine supports all of the reduction
1515   /// variables found for the given VF.
1516   bool canVectorizeReductions(ElementCount VF) {
1517     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1518       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1519       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1520     }));
1521   }
1522 
1523   /// Returns true if \p I is an instruction that will be scalarized with
1524   /// predication. Such instructions include conditional stores and
1525   /// instructions that may divide by zero.
1526   /// If a non-zero VF has been calculated, we check if I will be scalarized
1527   /// predication for that VF.
1528   bool isScalarWithPredication(Instruction *I) const;
1529 
1530   // Returns true if \p I is an instruction that will be predicated either
1531   // through scalar predication or masked load/store or masked gather/scatter.
1532   // Superset of instructions that return true for isScalarWithPredication.
1533   bool isPredicatedInst(Instruction *I) {
1534     if (!blockNeedsPredication(I->getParent()))
1535       return false;
1536     // Loads and stores that need some form of masked operation are predicated
1537     // instructions.
1538     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1539       return Legal->isMaskRequired(I);
1540     return isScalarWithPredication(I);
1541   }
1542 
1543   /// Returns true if \p I is a memory instruction with consecutive memory
1544   /// access that can be widened.
1545   bool
1546   memoryInstructionCanBeWidened(Instruction *I,
1547                                 ElementCount VF = ElementCount::getFixed(1));
1548 
1549   /// Returns true if \p I is a memory instruction in an interleaved-group
1550   /// of memory accesses that can be vectorized with wide vector loads/stores
1551   /// and shuffles.
1552   bool
1553   interleavedAccessCanBeWidened(Instruction *I,
1554                                 ElementCount VF = ElementCount::getFixed(1));
1555 
1556   /// Check if \p Instr belongs to any interleaved access group.
1557   bool isAccessInterleaved(Instruction *Instr) {
1558     return InterleaveInfo.isInterleaved(Instr);
1559   }
1560 
1561   /// Get the interleaved access group that \p Instr belongs to.
1562   const InterleaveGroup<Instruction> *
1563   getInterleavedAccessGroup(Instruction *Instr) {
1564     return InterleaveInfo.getInterleaveGroup(Instr);
1565   }
1566 
1567   /// Returns true if we're required to use a scalar epilogue for at least
1568   /// the final iteration of the original loop.
1569   bool requiresScalarEpilogue() const {
1570     if (!isScalarEpilogueAllowed())
1571       return false;
1572     // If we might exit from anywhere but the latch, must run the exiting
1573     // iteration in scalar form.
1574     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1575       return true;
1576     return InterleaveInfo.requiresScalarEpilogue();
1577   }
1578 
1579   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1580   /// loop hint annotation.
1581   bool isScalarEpilogueAllowed() const {
1582     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1583   }
1584 
1585   /// Returns true if all loop blocks should be masked to fold tail loop.
1586   bool foldTailByMasking() const { return FoldTailByMasking; }
1587 
1588   bool blockNeedsPredication(BasicBlock *BB) const {
1589     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1590   }
1591 
1592   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1593   /// nodes to the chain of instructions representing the reductions. Uses a
1594   /// MapVector to ensure deterministic iteration order.
1595   using ReductionChainMap =
1596       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1597 
1598   /// Return the chain of instructions representing an inloop reduction.
1599   const ReductionChainMap &getInLoopReductionChains() const {
1600     return InLoopReductionChains;
1601   }
1602 
1603   /// Returns true if the Phi is part of an inloop reduction.
1604   bool isInLoopReduction(PHINode *Phi) const {
1605     return InLoopReductionChains.count(Phi);
1606   }
1607 
1608   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1609   /// with factor VF.  Return the cost of the instruction, including
1610   /// scalarization overhead if it's needed.
1611   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1612 
1613   /// Estimate cost of a call instruction CI if it were vectorized with factor
1614   /// VF. Return the cost of the instruction, including scalarization overhead
1615   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1616   /// scalarized -
1617   /// i.e. either vector version isn't available, or is too expensive.
1618   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1619                                     bool &NeedToScalarize) const;
1620 
1621   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1622   /// that of B.
1623   bool isMoreProfitable(const VectorizationFactor &A,
1624                         const VectorizationFactor &B) const;
1625 
1626   /// Invalidates decisions already taken by the cost model.
1627   void invalidateCostModelingDecisions() {
1628     WideningDecisions.clear();
1629     Uniforms.clear();
1630     Scalars.clear();
1631   }
1632 
1633 private:
1634   unsigned NumPredStores = 0;
1635 
1636   /// \return An upper bound for the vectorization factors for both
1637   /// fixed and scalable vectorization, where the minimum-known number of
1638   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1639   /// disabled or unsupported, then the scalable part will be equal to
1640   /// ElementCount::getScalable(0).
1641   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1642                                            ElementCount UserVF);
1643 
1644   /// \return the maximized element count based on the targets vector
1645   /// registers and the loop trip-count, but limited to a maximum safe VF.
1646   /// This is a helper function of computeFeasibleMaxVF.
1647   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1648   /// issue that occurred on one of the buildbots which cannot be reproduced
1649   /// without having access to the properietary compiler (see comments on
1650   /// D98509). The issue is currently under investigation and this workaround
1651   /// will be removed as soon as possible.
1652   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1653                                        unsigned SmallestType,
1654                                        unsigned WidestType,
1655                                        const ElementCount &MaxSafeVF);
1656 
1657   /// \return the maximum legal scalable VF, based on the safe max number
1658   /// of elements.
1659   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1660 
1661   /// The vectorization cost is a combination of the cost itself and a boolean
1662   /// indicating whether any of the contributing operations will actually
1663   /// operate on
1664   /// vector values after type legalization in the backend. If this latter value
1665   /// is
1666   /// false, then all operations will be scalarized (i.e. no vectorization has
1667   /// actually taken place).
1668   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1669 
1670   /// Returns the expected execution cost. The unit of the cost does
1671   /// not matter because we use the 'cost' units to compare different
1672   /// vector widths. The cost that is returned is *not* normalized by
1673   /// the factor width.
1674   VectorizationCostTy expectedCost(ElementCount VF);
1675 
1676   /// Returns the execution time cost of an instruction for a given vector
1677   /// width. Vector width of one means scalar.
1678   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1679 
1680   /// The cost-computation logic from getInstructionCost which provides
1681   /// the vector type as an output parameter.
1682   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1683                                      Type *&VectorTy);
1684 
1685   /// Return the cost of instructions in an inloop reduction pattern, if I is
1686   /// part of that pattern.
1687   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1688                                           Type *VectorTy,
1689                                           TTI::TargetCostKind CostKind);
1690 
1691   /// Calculate vectorization cost of memory instruction \p I.
1692   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1693 
1694   /// The cost computation for scalarized memory instruction.
1695   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1696 
1697   /// The cost computation for interleaving group of memory instructions.
1698   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1699 
1700   /// The cost computation for Gather/Scatter instruction.
1701   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1702 
1703   /// The cost computation for widening instruction \p I with consecutive
1704   /// memory access.
1705   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1706 
1707   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1708   /// Load: scalar load + broadcast.
1709   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1710   /// element)
1711   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1712 
1713   /// Estimate the overhead of scalarizing an instruction. This is a
1714   /// convenience wrapper for the type-based getScalarizationOverhead API.
1715   InstructionCost getScalarizationOverhead(Instruction *I,
1716                                            ElementCount VF) const;
1717 
1718   /// Returns whether the instruction is a load or store and will be a emitted
1719   /// as a vector operation.
1720   bool isConsecutiveLoadOrStore(Instruction *I);
1721 
1722   /// Returns true if an artificially high cost for emulated masked memrefs
1723   /// should be used.
1724   bool useEmulatedMaskMemRefHack(Instruction *I);
1725 
1726   /// Map of scalar integer values to the smallest bitwidth they can be legally
1727   /// represented as. The vector equivalents of these values should be truncated
1728   /// to this type.
1729   MapVector<Instruction *, uint64_t> MinBWs;
1730 
1731   /// A type representing the costs for instructions if they were to be
1732   /// scalarized rather than vectorized. The entries are Instruction-Cost
1733   /// pairs.
1734   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1735 
1736   /// A set containing all BasicBlocks that are known to present after
1737   /// vectorization as a predicated block.
1738   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1739 
1740   /// Records whether it is allowed to have the original scalar loop execute at
1741   /// least once. This may be needed as a fallback loop in case runtime
1742   /// aliasing/dependence checks fail, or to handle the tail/remainder
1743   /// iterations when the trip count is unknown or doesn't divide by the VF,
1744   /// or as a peel-loop to handle gaps in interleave-groups.
1745   /// Under optsize and when the trip count is very small we don't allow any
1746   /// iterations to execute in the scalar loop.
1747   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1748 
1749   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1750   bool FoldTailByMasking = false;
1751 
1752   /// A map holding scalar costs for different vectorization factors. The
1753   /// presence of a cost for an instruction in the mapping indicates that the
1754   /// instruction will be scalarized when vectorizing with the associated
1755   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1756   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1757 
1758   /// Holds the instructions known to be uniform after vectorization.
1759   /// The data is collected per VF.
1760   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1761 
1762   /// Holds the instructions known to be scalar after vectorization.
1763   /// The data is collected per VF.
1764   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1765 
1766   /// Holds the instructions (address computations) that are forced to be
1767   /// scalarized.
1768   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1769 
1770   /// PHINodes of the reductions that should be expanded in-loop along with
1771   /// their associated chains of reduction operations, in program order from top
1772   /// (PHI) to bottom
1773   ReductionChainMap InLoopReductionChains;
1774 
1775   /// A Map of inloop reduction operations and their immediate chain operand.
1776   /// FIXME: This can be removed once reductions can be costed correctly in
1777   /// vplan. This was added to allow quick lookup to the inloop operations,
1778   /// without having to loop through InLoopReductionChains.
1779   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1780 
1781   /// Returns the expected difference in cost from scalarizing the expression
1782   /// feeding a predicated instruction \p PredInst. The instructions to
1783   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1784   /// non-negative return value implies the expression will be scalarized.
1785   /// Currently, only single-use chains are considered for scalarization.
1786   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1787                               ElementCount VF);
1788 
1789   /// Collect the instructions that are uniform after vectorization. An
1790   /// instruction is uniform if we represent it with a single scalar value in
1791   /// the vectorized loop corresponding to each vector iteration. Examples of
1792   /// uniform instructions include pointer operands of consecutive or
1793   /// interleaved memory accesses. Note that although uniformity implies an
1794   /// instruction will be scalar, the reverse is not true. In general, a
1795   /// scalarized instruction will be represented by VF scalar values in the
1796   /// vectorized loop, each corresponding to an iteration of the original
1797   /// scalar loop.
1798   void collectLoopUniforms(ElementCount VF);
1799 
1800   /// Collect the instructions that are scalar after vectorization. An
1801   /// instruction is scalar if it is known to be uniform or will be scalarized
1802   /// during vectorization. Non-uniform scalarized instructions will be
1803   /// represented by VF values in the vectorized loop, each corresponding to an
1804   /// iteration of the original scalar loop.
1805   void collectLoopScalars(ElementCount VF);
1806 
1807   /// Keeps cost model vectorization decision and cost for instructions.
1808   /// Right now it is used for memory instructions only.
1809   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1810                                 std::pair<InstWidening, InstructionCost>>;
1811 
1812   DecisionList WideningDecisions;
1813 
1814   /// Returns true if \p V is expected to be vectorized and it needs to be
1815   /// extracted.
1816   bool needsExtract(Value *V, ElementCount VF) const {
1817     Instruction *I = dyn_cast<Instruction>(V);
1818     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1819         TheLoop->isLoopInvariant(I))
1820       return false;
1821 
1822     // Assume we can vectorize V (and hence we need extraction) if the
1823     // scalars are not computed yet. This can happen, because it is called
1824     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1825     // the scalars are collected. That should be a safe assumption in most
1826     // cases, because we check if the operands have vectorizable types
1827     // beforehand in LoopVectorizationLegality.
1828     return Scalars.find(VF) == Scalars.end() ||
1829            !isScalarAfterVectorization(I, VF);
1830   };
1831 
1832   /// Returns a range containing only operands needing to be extracted.
1833   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1834                                                    ElementCount VF) const {
1835     return SmallVector<Value *, 4>(make_filter_range(
1836         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1837   }
1838 
1839   /// Determines if we have the infrastructure to vectorize loop \p L and its
1840   /// epilogue, assuming the main loop is vectorized by \p VF.
1841   bool isCandidateForEpilogueVectorization(const Loop &L,
1842                                            const ElementCount VF) const;
1843 
1844   /// Returns true if epilogue vectorization is considered profitable, and
1845   /// false otherwise.
1846   /// \p VF is the vectorization factor chosen for the original loop.
1847   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1848 
1849 public:
1850   /// The loop that we evaluate.
1851   Loop *TheLoop;
1852 
1853   /// Predicated scalar evolution analysis.
1854   PredicatedScalarEvolution &PSE;
1855 
1856   /// Loop Info analysis.
1857   LoopInfo *LI;
1858 
1859   /// Vectorization legality.
1860   LoopVectorizationLegality *Legal;
1861 
1862   /// Vector target information.
1863   const TargetTransformInfo &TTI;
1864 
1865   /// Target Library Info.
1866   const TargetLibraryInfo *TLI;
1867 
1868   /// Demanded bits analysis.
1869   DemandedBits *DB;
1870 
1871   /// Assumption cache.
1872   AssumptionCache *AC;
1873 
1874   /// Interface to emit optimization remarks.
1875   OptimizationRemarkEmitter *ORE;
1876 
1877   const Function *TheFunction;
1878 
1879   /// Loop Vectorize Hint.
1880   const LoopVectorizeHints *Hints;
1881 
1882   /// The interleave access information contains groups of interleaved accesses
1883   /// with the same stride and close to each other.
1884   InterleavedAccessInfo &InterleaveInfo;
1885 
1886   /// Values to ignore in the cost model.
1887   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1888 
1889   /// Values to ignore in the cost model when VF > 1.
1890   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1891 
1892   /// Profitable vector factors.
1893   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1894 };
1895 } // end namespace llvm
1896 
1897 /// Helper struct to manage generating runtime checks for vectorization.
1898 ///
1899 /// The runtime checks are created up-front in temporary blocks to allow better
1900 /// estimating the cost and un-linked from the existing IR. After deciding to
1901 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1902 /// temporary blocks are completely removed.
1903 class GeneratedRTChecks {
1904   /// Basic block which contains the generated SCEV checks, if any.
1905   BasicBlock *SCEVCheckBlock = nullptr;
1906 
1907   /// The value representing the result of the generated SCEV checks. If it is
1908   /// nullptr, either no SCEV checks have been generated or they have been used.
1909   Value *SCEVCheckCond = nullptr;
1910 
1911   /// Basic block which contains the generated memory runtime checks, if any.
1912   BasicBlock *MemCheckBlock = nullptr;
1913 
1914   /// The value representing the result of the generated memory runtime checks.
1915   /// If it is nullptr, either no memory runtime checks have been generated or
1916   /// they have been used.
1917   Instruction *MemRuntimeCheckCond = nullptr;
1918 
1919   DominatorTree *DT;
1920   LoopInfo *LI;
1921 
1922   SCEVExpander SCEVExp;
1923   SCEVExpander MemCheckExp;
1924 
1925 public:
1926   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1927                     const DataLayout &DL)
1928       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1929         MemCheckExp(SE, DL, "scev.check") {}
1930 
1931   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1932   /// accurately estimate the cost of the runtime checks. The blocks are
1933   /// un-linked from the IR and is added back during vector code generation. If
1934   /// there is no vector code generation, the check blocks are removed
1935   /// completely.
1936   void Create(Loop *L, const LoopAccessInfo &LAI,
1937               const SCEVUnionPredicate &UnionPred) {
1938 
1939     BasicBlock *LoopHeader = L->getHeader();
1940     BasicBlock *Preheader = L->getLoopPreheader();
1941 
1942     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1943     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1944     // may be used by SCEVExpander. The blocks will be un-linked from their
1945     // predecessors and removed from LI & DT at the end of the function.
1946     if (!UnionPred.isAlwaysTrue()) {
1947       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1948                                   nullptr, "vector.scevcheck");
1949 
1950       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1951           &UnionPred, SCEVCheckBlock->getTerminator());
1952     }
1953 
1954     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1955     if (RtPtrChecking.Need) {
1956       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1957       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1958                                  "vector.memcheck");
1959 
1960       std::tie(std::ignore, MemRuntimeCheckCond) =
1961           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1962                            RtPtrChecking.getChecks(), MemCheckExp);
1963       assert(MemRuntimeCheckCond &&
1964              "no RT checks generated although RtPtrChecking "
1965              "claimed checks are required");
1966     }
1967 
1968     if (!MemCheckBlock && !SCEVCheckBlock)
1969       return;
1970 
1971     // Unhook the temporary block with the checks, update various places
1972     // accordingly.
1973     if (SCEVCheckBlock)
1974       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1975     if (MemCheckBlock)
1976       MemCheckBlock->replaceAllUsesWith(Preheader);
1977 
1978     if (SCEVCheckBlock) {
1979       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1980       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1981       Preheader->getTerminator()->eraseFromParent();
1982     }
1983     if (MemCheckBlock) {
1984       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1985       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1986       Preheader->getTerminator()->eraseFromParent();
1987     }
1988 
1989     DT->changeImmediateDominator(LoopHeader, Preheader);
1990     if (MemCheckBlock) {
1991       DT->eraseNode(MemCheckBlock);
1992       LI->removeBlock(MemCheckBlock);
1993     }
1994     if (SCEVCheckBlock) {
1995       DT->eraseNode(SCEVCheckBlock);
1996       LI->removeBlock(SCEVCheckBlock);
1997     }
1998   }
1999 
2000   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2001   /// unused.
2002   ~GeneratedRTChecks() {
2003     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
2004     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
2005     if (!SCEVCheckCond)
2006       SCEVCleaner.markResultUsed();
2007 
2008     if (!MemRuntimeCheckCond)
2009       MemCheckCleaner.markResultUsed();
2010 
2011     if (MemRuntimeCheckCond) {
2012       auto &SE = *MemCheckExp.getSE();
2013       // Memory runtime check generation creates compares that use expanded
2014       // values. Remove them before running the SCEVExpanderCleaners.
2015       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2016         if (MemCheckExp.isInsertedInstruction(&I))
2017           continue;
2018         SE.forgetValue(&I);
2019         SE.eraseValueFromMap(&I);
2020         I.eraseFromParent();
2021       }
2022     }
2023     MemCheckCleaner.cleanup();
2024     SCEVCleaner.cleanup();
2025 
2026     if (SCEVCheckCond)
2027       SCEVCheckBlock->eraseFromParent();
2028     if (MemRuntimeCheckCond)
2029       MemCheckBlock->eraseFromParent();
2030   }
2031 
2032   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2033   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2034   /// depending on the generated condition.
2035   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2036                              BasicBlock *LoopVectorPreHeader,
2037                              BasicBlock *LoopExitBlock) {
2038     if (!SCEVCheckCond)
2039       return nullptr;
2040     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2041       if (C->isZero())
2042         return nullptr;
2043 
2044     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2045 
2046     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2047     // Create new preheader for vector loop.
2048     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2049       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2050 
2051     SCEVCheckBlock->getTerminator()->eraseFromParent();
2052     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2053     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2054                                                 SCEVCheckBlock);
2055 
2056     DT->addNewBlock(SCEVCheckBlock, Pred);
2057     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2058 
2059     ReplaceInstWithInst(
2060         SCEVCheckBlock->getTerminator(),
2061         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2062     // Mark the check as used, to prevent it from being removed during cleanup.
2063     SCEVCheckCond = nullptr;
2064     return SCEVCheckBlock;
2065   }
2066 
2067   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2068   /// the branches to branch to the vector preheader or \p Bypass, depending on
2069   /// the generated condition.
2070   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2071                                    BasicBlock *LoopVectorPreHeader) {
2072     // Check if we generated code that checks in runtime if arrays overlap.
2073     if (!MemRuntimeCheckCond)
2074       return nullptr;
2075 
2076     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2077     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2078                                                 MemCheckBlock);
2079 
2080     DT->addNewBlock(MemCheckBlock, Pred);
2081     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2082     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2083 
2084     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2085       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2086 
2087     ReplaceInstWithInst(
2088         MemCheckBlock->getTerminator(),
2089         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2090     MemCheckBlock->getTerminator()->setDebugLoc(
2091         Pred->getTerminator()->getDebugLoc());
2092 
2093     // Mark the check as used, to prevent it from being removed during cleanup.
2094     MemRuntimeCheckCond = nullptr;
2095     return MemCheckBlock;
2096   }
2097 };
2098 
2099 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2100 // vectorization. The loop needs to be annotated with #pragma omp simd
2101 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2102 // vector length information is not provided, vectorization is not considered
2103 // explicit. Interleave hints are not allowed either. These limitations will be
2104 // relaxed in the future.
2105 // Please, note that we are currently forced to abuse the pragma 'clang
2106 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2107 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2108 // provides *explicit vectorization hints* (LV can bypass legal checks and
2109 // assume that vectorization is legal). However, both hints are implemented
2110 // using the same metadata (llvm.loop.vectorize, processed by
2111 // LoopVectorizeHints). This will be fixed in the future when the native IR
2112 // representation for pragma 'omp simd' is introduced.
2113 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2114                                    OptimizationRemarkEmitter *ORE) {
2115   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2116   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2117 
2118   // Only outer loops with an explicit vectorization hint are supported.
2119   // Unannotated outer loops are ignored.
2120   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2121     return false;
2122 
2123   Function *Fn = OuterLp->getHeader()->getParent();
2124   if (!Hints.allowVectorization(Fn, OuterLp,
2125                                 true /*VectorizeOnlyWhenForced*/)) {
2126     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2127     return false;
2128   }
2129 
2130   if (Hints.getInterleave() > 1) {
2131     // TODO: Interleave support is future work.
2132     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2133                          "outer loops.\n");
2134     Hints.emitRemarkWithHints();
2135     return false;
2136   }
2137 
2138   return true;
2139 }
2140 
2141 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2142                                   OptimizationRemarkEmitter *ORE,
2143                                   SmallVectorImpl<Loop *> &V) {
2144   // Collect inner loops and outer loops without irreducible control flow. For
2145   // now, only collect outer loops that have explicit vectorization hints. If we
2146   // are stress testing the VPlan H-CFG construction, we collect the outermost
2147   // loop of every loop nest.
2148   if (L.isInnermost() || VPlanBuildStressTest ||
2149       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2150     LoopBlocksRPO RPOT(&L);
2151     RPOT.perform(LI);
2152     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2153       V.push_back(&L);
2154       // TODO: Collect inner loops inside marked outer loops in case
2155       // vectorization fails for the outer loop. Do not invoke
2156       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2157       // already known to be reducible. We can use an inherited attribute for
2158       // that.
2159       return;
2160     }
2161   }
2162   for (Loop *InnerL : L)
2163     collectSupportedLoops(*InnerL, LI, ORE, V);
2164 }
2165 
2166 namespace {
2167 
2168 /// The LoopVectorize Pass.
2169 struct LoopVectorize : public FunctionPass {
2170   /// Pass identification, replacement for typeid
2171   static char ID;
2172 
2173   LoopVectorizePass Impl;
2174 
2175   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2176                          bool VectorizeOnlyWhenForced = false)
2177       : FunctionPass(ID),
2178         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2179     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2180   }
2181 
2182   bool runOnFunction(Function &F) override {
2183     if (skipFunction(F))
2184       return false;
2185 
2186     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2187     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2188     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2189     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2190     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2191     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2192     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2193     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2194     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2195     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2196     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2197     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2198     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2199 
2200     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2201         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2202 
2203     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2204                         GetLAA, *ORE, PSI).MadeAnyChange;
2205   }
2206 
2207   void getAnalysisUsage(AnalysisUsage &AU) const override {
2208     AU.addRequired<AssumptionCacheTracker>();
2209     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2210     AU.addRequired<DominatorTreeWrapperPass>();
2211     AU.addRequired<LoopInfoWrapperPass>();
2212     AU.addRequired<ScalarEvolutionWrapperPass>();
2213     AU.addRequired<TargetTransformInfoWrapperPass>();
2214     AU.addRequired<AAResultsWrapperPass>();
2215     AU.addRequired<LoopAccessLegacyAnalysis>();
2216     AU.addRequired<DemandedBitsWrapperPass>();
2217     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2218     AU.addRequired<InjectTLIMappingsLegacy>();
2219 
2220     // We currently do not preserve loopinfo/dominator analyses with outer loop
2221     // vectorization. Until this is addressed, mark these analyses as preserved
2222     // only for non-VPlan-native path.
2223     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2224     if (!EnableVPlanNativePath) {
2225       AU.addPreserved<LoopInfoWrapperPass>();
2226       AU.addPreserved<DominatorTreeWrapperPass>();
2227     }
2228 
2229     AU.addPreserved<BasicAAWrapperPass>();
2230     AU.addPreserved<GlobalsAAWrapperPass>();
2231     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2232   }
2233 };
2234 
2235 } // end anonymous namespace
2236 
2237 //===----------------------------------------------------------------------===//
2238 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2239 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2240 //===----------------------------------------------------------------------===//
2241 
2242 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2243   // We need to place the broadcast of invariant variables outside the loop,
2244   // but only if it's proven safe to do so. Else, broadcast will be inside
2245   // vector loop body.
2246   Instruction *Instr = dyn_cast<Instruction>(V);
2247   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2248                      (!Instr ||
2249                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2250   // Place the code for broadcasting invariant variables in the new preheader.
2251   IRBuilder<>::InsertPointGuard Guard(Builder);
2252   if (SafeToHoist)
2253     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2254 
2255   // Broadcast the scalar into all locations in the vector.
2256   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2257 
2258   return Shuf;
2259 }
2260 
2261 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2262     const InductionDescriptor &II, Value *Step, Value *Start,
2263     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2264     VPTransformState &State) {
2265   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2266          "Expected either an induction phi-node or a truncate of it!");
2267 
2268   // Construct the initial value of the vector IV in the vector loop preheader
2269   auto CurrIP = Builder.saveIP();
2270   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2271   if (isa<TruncInst>(EntryVal)) {
2272     assert(Start->getType()->isIntegerTy() &&
2273            "Truncation requires an integer type");
2274     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2275     Step = Builder.CreateTrunc(Step, TruncType);
2276     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2277   }
2278   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2279   Value *SteppedStart =
2280       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2281 
2282   // We create vector phi nodes for both integer and floating-point induction
2283   // variables. Here, we determine the kind of arithmetic we will perform.
2284   Instruction::BinaryOps AddOp;
2285   Instruction::BinaryOps MulOp;
2286   if (Step->getType()->isIntegerTy()) {
2287     AddOp = Instruction::Add;
2288     MulOp = Instruction::Mul;
2289   } else {
2290     AddOp = II.getInductionOpcode();
2291     MulOp = Instruction::FMul;
2292   }
2293 
2294   // Multiply the vectorization factor by the step using integer or
2295   // floating-point arithmetic as appropriate.
2296   Type *StepType = Step->getType();
2297   if (Step->getType()->isFloatingPointTy())
2298     StepType = IntegerType::get(StepType->getContext(),
2299                                 StepType->getScalarSizeInBits());
2300   Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2301   if (Step->getType()->isFloatingPointTy())
2302     RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType());
2303   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2304 
2305   // Create a vector splat to use in the induction update.
2306   //
2307   // FIXME: If the step is non-constant, we create the vector splat with
2308   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2309   //        handle a constant vector splat.
2310   Value *SplatVF = isa<Constant>(Mul)
2311                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2312                        : Builder.CreateVectorSplat(VF, Mul);
2313   Builder.restoreIP(CurrIP);
2314 
2315   // We may need to add the step a number of times, depending on the unroll
2316   // factor. The last of those goes into the PHI.
2317   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2318                                     &*LoopVectorBody->getFirstInsertionPt());
2319   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2320   Instruction *LastInduction = VecInd;
2321   for (unsigned Part = 0; Part < UF; ++Part) {
2322     State.set(Def, LastInduction, Part);
2323 
2324     if (isa<TruncInst>(EntryVal))
2325       addMetadata(LastInduction, EntryVal);
2326     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2327                                           State, Part);
2328 
2329     LastInduction = cast<Instruction>(
2330         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2331     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2332   }
2333 
2334   // Move the last step to the end of the latch block. This ensures consistent
2335   // placement of all induction updates.
2336   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2337   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2338   auto *ICmp = cast<Instruction>(Br->getCondition());
2339   LastInduction->moveBefore(ICmp);
2340   LastInduction->setName("vec.ind.next");
2341 
2342   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2343   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2344 }
2345 
2346 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2347   return Cost->isScalarAfterVectorization(I, VF) ||
2348          Cost->isProfitableToScalarize(I, VF);
2349 }
2350 
2351 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2352   if (shouldScalarizeInstruction(IV))
2353     return true;
2354   auto isScalarInst = [&](User *U) -> bool {
2355     auto *I = cast<Instruction>(U);
2356     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2357   };
2358   return llvm::any_of(IV->users(), isScalarInst);
2359 }
2360 
2361 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2362     const InductionDescriptor &ID, const Instruction *EntryVal,
2363     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2364     unsigned Part, unsigned Lane) {
2365   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2366          "Expected either an induction phi-node or a truncate of it!");
2367 
2368   // This induction variable is not the phi from the original loop but the
2369   // newly-created IV based on the proof that casted Phi is equal to the
2370   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2371   // re-uses the same InductionDescriptor that original IV uses but we don't
2372   // have to do any recording in this case - that is done when original IV is
2373   // processed.
2374   if (isa<TruncInst>(EntryVal))
2375     return;
2376 
2377   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2378   if (Casts.empty())
2379     return;
2380   // Only the first Cast instruction in the Casts vector is of interest.
2381   // The rest of the Casts (if exist) have no uses outside the
2382   // induction update chain itself.
2383   if (Lane < UINT_MAX)
2384     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2385   else
2386     State.set(CastDef, VectorLoopVal, Part);
2387 }
2388 
2389 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2390                                                 TruncInst *Trunc, VPValue *Def,
2391                                                 VPValue *CastDef,
2392                                                 VPTransformState &State) {
2393   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2394          "Primary induction variable must have an integer type");
2395 
2396   auto II = Legal->getInductionVars().find(IV);
2397   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2398 
2399   auto ID = II->second;
2400   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2401 
2402   // The value from the original loop to which we are mapping the new induction
2403   // variable.
2404   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2405 
2406   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2407 
2408   // Generate code for the induction step. Note that induction steps are
2409   // required to be loop-invariant
2410   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2411     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2412            "Induction step should be loop invariant");
2413     if (PSE.getSE()->isSCEVable(IV->getType())) {
2414       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2415       return Exp.expandCodeFor(Step, Step->getType(),
2416                                LoopVectorPreHeader->getTerminator());
2417     }
2418     return cast<SCEVUnknown>(Step)->getValue();
2419   };
2420 
2421   // The scalar value to broadcast. This is derived from the canonical
2422   // induction variable. If a truncation type is given, truncate the canonical
2423   // induction variable and step. Otherwise, derive these values from the
2424   // induction descriptor.
2425   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2426     Value *ScalarIV = Induction;
2427     if (IV != OldInduction) {
2428       ScalarIV = IV->getType()->isIntegerTy()
2429                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2430                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2431                                           IV->getType());
2432       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2433       ScalarIV->setName("offset.idx");
2434     }
2435     if (Trunc) {
2436       auto *TruncType = cast<IntegerType>(Trunc->getType());
2437       assert(Step->getType()->isIntegerTy() &&
2438              "Truncation requires an integer step");
2439       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2440       Step = Builder.CreateTrunc(Step, TruncType);
2441     }
2442     return ScalarIV;
2443   };
2444 
2445   // Create the vector values from the scalar IV, in the absence of creating a
2446   // vector IV.
2447   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2448     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2449     for (unsigned Part = 0; Part < UF; ++Part) {
2450       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2451       Value *EntryPart =
2452           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2453                         ID.getInductionOpcode());
2454       State.set(Def, EntryPart, Part);
2455       if (Trunc)
2456         addMetadata(EntryPart, Trunc);
2457       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2458                                             State, Part);
2459     }
2460   };
2461 
2462   // Fast-math-flags propagate from the original induction instruction.
2463   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2464   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2465     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2466 
2467   // Now do the actual transformations, and start with creating the step value.
2468   Value *Step = CreateStepValue(ID.getStep());
2469   if (VF.isZero() || VF.isScalar()) {
2470     Value *ScalarIV = CreateScalarIV(Step);
2471     CreateSplatIV(ScalarIV, Step);
2472     return;
2473   }
2474 
2475   // Determine if we want a scalar version of the induction variable. This is
2476   // true if the induction variable itself is not widened, or if it has at
2477   // least one user in the loop that is not widened.
2478   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2479   if (!NeedsScalarIV) {
2480     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2481                                     State);
2482     return;
2483   }
2484 
2485   // Try to create a new independent vector induction variable. If we can't
2486   // create the phi node, we will splat the scalar induction variable in each
2487   // loop iteration.
2488   if (!shouldScalarizeInstruction(EntryVal)) {
2489     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2490                                     State);
2491     Value *ScalarIV = CreateScalarIV(Step);
2492     // Create scalar steps that can be used by instructions we will later
2493     // scalarize. Note that the addition of the scalar steps will not increase
2494     // the number of instructions in the loop in the common case prior to
2495     // InstCombine. We will be trading one vector extract for each scalar step.
2496     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2497     return;
2498   }
2499 
2500   // All IV users are scalar instructions, so only emit a scalar IV, not a
2501   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2502   // predicate used by the masked loads/stores.
2503   Value *ScalarIV = CreateScalarIV(Step);
2504   if (!Cost->isScalarEpilogueAllowed())
2505     CreateSplatIV(ScalarIV, Step);
2506   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2507 }
2508 
2509 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2510                                           Instruction::BinaryOps BinOp) {
2511   // Create and check the types.
2512   auto *ValVTy = cast<VectorType>(Val->getType());
2513   ElementCount VLen = ValVTy->getElementCount();
2514 
2515   Type *STy = Val->getType()->getScalarType();
2516   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2517          "Induction Step must be an integer or FP");
2518   assert(Step->getType() == STy && "Step has wrong type");
2519 
2520   SmallVector<Constant *, 8> Indices;
2521 
2522   // Create a vector of consecutive numbers from zero to VF.
2523   VectorType *InitVecValVTy = ValVTy;
2524   Type *InitVecValSTy = STy;
2525   if (STy->isFloatingPointTy()) {
2526     InitVecValSTy =
2527         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2528     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2529   }
2530   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2531 
2532   // Add on StartIdx
2533   Value *StartIdxSplat = Builder.CreateVectorSplat(
2534       VLen, ConstantInt::get(InitVecValSTy, StartIdx));
2535   InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2536 
2537   if (STy->isIntegerTy()) {
2538     Step = Builder.CreateVectorSplat(VLen, Step);
2539     assert(Step->getType() == Val->getType() && "Invalid step vec");
2540     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2541     // which can be found from the original scalar operations.
2542     Step = Builder.CreateMul(InitVec, Step);
2543     return Builder.CreateAdd(Val, Step, "induction");
2544   }
2545 
2546   // Floating point induction.
2547   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2548          "Binary Opcode should be specified for FP induction");
2549   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2550   Step = Builder.CreateVectorSplat(VLen, Step);
2551   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2552   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2553 }
2554 
2555 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2556                                            Instruction *EntryVal,
2557                                            const InductionDescriptor &ID,
2558                                            VPValue *Def, VPValue *CastDef,
2559                                            VPTransformState &State) {
2560   // We shouldn't have to build scalar steps if we aren't vectorizing.
2561   assert(VF.isVector() && "VF should be greater than one");
2562   // Get the value type and ensure it and the step have the same integer type.
2563   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2564   assert(ScalarIVTy == Step->getType() &&
2565          "Val and Step should have the same type");
2566 
2567   // We build scalar steps for both integer and floating-point induction
2568   // variables. Here, we determine the kind of arithmetic we will perform.
2569   Instruction::BinaryOps AddOp;
2570   Instruction::BinaryOps MulOp;
2571   if (ScalarIVTy->isIntegerTy()) {
2572     AddOp = Instruction::Add;
2573     MulOp = Instruction::Mul;
2574   } else {
2575     AddOp = ID.getInductionOpcode();
2576     MulOp = Instruction::FMul;
2577   }
2578 
2579   // Determine the number of scalars we need to generate for each unroll
2580   // iteration. If EntryVal is uniform, we only need to generate the first
2581   // lane. Otherwise, we generate all VF values.
2582   bool IsUniform =
2583       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2584   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2585   // Compute the scalar steps and save the results in State.
2586   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2587                                      ScalarIVTy->getScalarSizeInBits());
2588   Type *VecIVTy = nullptr;
2589   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2590   if (!IsUniform && VF.isScalable()) {
2591     VecIVTy = VectorType::get(ScalarIVTy, VF);
2592     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2593     SplatStep = Builder.CreateVectorSplat(VF, Step);
2594     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2595   }
2596 
2597   for (unsigned Part = 0; Part < UF; ++Part) {
2598     Value *StartIdx0 =
2599         createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2600 
2601     if (!IsUniform && VF.isScalable()) {
2602       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2603       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2604       if (ScalarIVTy->isFloatingPointTy())
2605         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2606       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2607       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2608       State.set(Def, Add, Part);
2609       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2610                                             Part);
2611       // It's useful to record the lane values too for the known minimum number
2612       // of elements so we do those below. This improves the code quality when
2613       // trying to extract the first element, for example.
2614     }
2615 
2616     if (ScalarIVTy->isFloatingPointTy())
2617       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2618 
2619     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2620       Value *StartIdx = Builder.CreateBinOp(
2621           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2622       // The step returned by `createStepForVF` is a runtime-evaluated value
2623       // when VF is scalable. Otherwise, it should be folded into a Constant.
2624       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2625              "Expected StartIdx to be folded to a constant when VF is not "
2626              "scalable");
2627       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2628       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2629       State.set(Def, Add, VPIteration(Part, Lane));
2630       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2631                                             Part, Lane);
2632     }
2633   }
2634 }
2635 
2636 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2637                                                     const VPIteration &Instance,
2638                                                     VPTransformState &State) {
2639   Value *ScalarInst = State.get(Def, Instance);
2640   Value *VectorValue = State.get(Def, Instance.Part);
2641   VectorValue = Builder.CreateInsertElement(
2642       VectorValue, ScalarInst,
2643       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2644   State.set(Def, VectorValue, Instance.Part);
2645 }
2646 
2647 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2648   assert(Vec->getType()->isVectorTy() && "Invalid type");
2649   return Builder.CreateVectorReverse(Vec, "reverse");
2650 }
2651 
2652 // Return whether we allow using masked interleave-groups (for dealing with
2653 // strided loads/stores that reside in predicated blocks, or for dealing
2654 // with gaps).
2655 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2656   // If an override option has been passed in for interleaved accesses, use it.
2657   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2658     return EnableMaskedInterleavedMemAccesses;
2659 
2660   return TTI.enableMaskedInterleavedAccessVectorization();
2661 }
2662 
2663 // Try to vectorize the interleave group that \p Instr belongs to.
2664 //
2665 // E.g. Translate following interleaved load group (factor = 3):
2666 //   for (i = 0; i < N; i+=3) {
2667 //     R = Pic[i];             // Member of index 0
2668 //     G = Pic[i+1];           // Member of index 1
2669 //     B = Pic[i+2];           // Member of index 2
2670 //     ... // do something to R, G, B
2671 //   }
2672 // To:
2673 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2674 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2675 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2676 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2677 //
2678 // Or translate following interleaved store group (factor = 3):
2679 //   for (i = 0; i < N; i+=3) {
2680 //     ... do something to R, G, B
2681 //     Pic[i]   = R;           // Member of index 0
2682 //     Pic[i+1] = G;           // Member of index 1
2683 //     Pic[i+2] = B;           // Member of index 2
2684 //   }
2685 // To:
2686 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2687 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2688 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2689 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2690 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2691 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2692     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2693     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2694     VPValue *BlockInMask) {
2695   Instruction *Instr = Group->getInsertPos();
2696   const DataLayout &DL = Instr->getModule()->getDataLayout();
2697 
2698   // Prepare for the vector type of the interleaved load/store.
2699   Type *ScalarTy = getLoadStoreType(Instr);
2700   unsigned InterleaveFactor = Group->getFactor();
2701   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2702   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2703 
2704   // Prepare for the new pointers.
2705   SmallVector<Value *, 2> AddrParts;
2706   unsigned Index = Group->getIndex(Instr);
2707 
2708   // TODO: extend the masked interleaved-group support to reversed access.
2709   assert((!BlockInMask || !Group->isReverse()) &&
2710          "Reversed masked interleave-group not supported.");
2711 
2712   // If the group is reverse, adjust the index to refer to the last vector lane
2713   // instead of the first. We adjust the index from the first vector lane,
2714   // rather than directly getting the pointer for lane VF - 1, because the
2715   // pointer operand of the interleaved access is supposed to be uniform. For
2716   // uniform instructions, we're only required to generate a value for the
2717   // first vector lane in each unroll iteration.
2718   if (Group->isReverse())
2719     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2720 
2721   for (unsigned Part = 0; Part < UF; Part++) {
2722     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2723     setDebugLocFromInst(Builder, AddrPart);
2724 
2725     // Notice current instruction could be any index. Need to adjust the address
2726     // to the member of index 0.
2727     //
2728     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2729     //       b = A[i];       // Member of index 0
2730     // Current pointer is pointed to A[i+1], adjust it to A[i].
2731     //
2732     // E.g.  A[i+1] = a;     // Member of index 1
2733     //       A[i]   = b;     // Member of index 0
2734     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2735     // Current pointer is pointed to A[i+2], adjust it to A[i].
2736 
2737     bool InBounds = false;
2738     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2739       InBounds = gep->isInBounds();
2740     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2741     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2742 
2743     // Cast to the vector pointer type.
2744     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2745     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2746     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2747   }
2748 
2749   setDebugLocFromInst(Builder, Instr);
2750   Value *PoisonVec = PoisonValue::get(VecTy);
2751 
2752   Value *MaskForGaps = nullptr;
2753   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2754     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2755     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2756   }
2757 
2758   // Vectorize the interleaved load group.
2759   if (isa<LoadInst>(Instr)) {
2760     // For each unroll part, create a wide load for the group.
2761     SmallVector<Value *, 2> NewLoads;
2762     for (unsigned Part = 0; Part < UF; Part++) {
2763       Instruction *NewLoad;
2764       if (BlockInMask || MaskForGaps) {
2765         assert(useMaskedInterleavedAccesses(*TTI) &&
2766                "masked interleaved groups are not allowed.");
2767         Value *GroupMask = MaskForGaps;
2768         if (BlockInMask) {
2769           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2770           Value *ShuffledMask = Builder.CreateShuffleVector(
2771               BlockInMaskPart,
2772               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2773               "interleaved.mask");
2774           GroupMask = MaskForGaps
2775                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2776                                                 MaskForGaps)
2777                           : ShuffledMask;
2778         }
2779         NewLoad =
2780             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2781                                      GroupMask, PoisonVec, "wide.masked.vec");
2782       }
2783       else
2784         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2785                                             Group->getAlign(), "wide.vec");
2786       Group->addMetadata(NewLoad);
2787       NewLoads.push_back(NewLoad);
2788     }
2789 
2790     // For each member in the group, shuffle out the appropriate data from the
2791     // wide loads.
2792     unsigned J = 0;
2793     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2794       Instruction *Member = Group->getMember(I);
2795 
2796       // Skip the gaps in the group.
2797       if (!Member)
2798         continue;
2799 
2800       auto StrideMask =
2801           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2802       for (unsigned Part = 0; Part < UF; Part++) {
2803         Value *StridedVec = Builder.CreateShuffleVector(
2804             NewLoads[Part], StrideMask, "strided.vec");
2805 
2806         // If this member has different type, cast the result type.
2807         if (Member->getType() != ScalarTy) {
2808           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2809           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2810           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2811         }
2812 
2813         if (Group->isReverse())
2814           StridedVec = reverseVector(StridedVec);
2815 
2816         State.set(VPDefs[J], StridedVec, Part);
2817       }
2818       ++J;
2819     }
2820     return;
2821   }
2822 
2823   // The sub vector type for current instruction.
2824   auto *SubVT = VectorType::get(ScalarTy, VF);
2825 
2826   // Vectorize the interleaved store group.
2827   for (unsigned Part = 0; Part < UF; Part++) {
2828     // Collect the stored vector from each member.
2829     SmallVector<Value *, 4> StoredVecs;
2830     for (unsigned i = 0; i < InterleaveFactor; i++) {
2831       // Interleaved store group doesn't allow a gap, so each index has a member
2832       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2833 
2834       Value *StoredVec = State.get(StoredValues[i], Part);
2835 
2836       if (Group->isReverse())
2837         StoredVec = reverseVector(StoredVec);
2838 
2839       // If this member has different type, cast it to a unified type.
2840 
2841       if (StoredVec->getType() != SubVT)
2842         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2843 
2844       StoredVecs.push_back(StoredVec);
2845     }
2846 
2847     // Concatenate all vectors into a wide vector.
2848     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2849 
2850     // Interleave the elements in the wide vector.
2851     Value *IVec = Builder.CreateShuffleVector(
2852         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2853         "interleaved.vec");
2854 
2855     Instruction *NewStoreInstr;
2856     if (BlockInMask) {
2857       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2858       Value *ShuffledMask = Builder.CreateShuffleVector(
2859           BlockInMaskPart,
2860           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2861           "interleaved.mask");
2862       NewStoreInstr = Builder.CreateMaskedStore(
2863           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2864     }
2865     else
2866       NewStoreInstr =
2867           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2868 
2869     Group->addMetadata(NewStoreInstr);
2870   }
2871 }
2872 
2873 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2874     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2875     VPValue *StoredValue, VPValue *BlockInMask) {
2876   // Attempt to issue a wide load.
2877   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2878   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2879 
2880   assert((LI || SI) && "Invalid Load/Store instruction");
2881   assert((!SI || StoredValue) && "No stored value provided for widened store");
2882   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2883 
2884   LoopVectorizationCostModel::InstWidening Decision =
2885       Cost->getWideningDecision(Instr, VF);
2886   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2887           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2888           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2889          "CM decision is not to widen the memory instruction");
2890 
2891   Type *ScalarDataTy = getLoadStoreType(Instr);
2892 
2893   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2894   const Align Alignment = getLoadStoreAlignment(Instr);
2895 
2896   // Determine if the pointer operand of the access is either consecutive or
2897   // reverse consecutive.
2898   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2899   bool ConsecutiveStride =
2900       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2901   bool CreateGatherScatter =
2902       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2903 
2904   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2905   // gather/scatter. Otherwise Decision should have been to Scalarize.
2906   assert((ConsecutiveStride || CreateGatherScatter) &&
2907          "The instruction should be scalarized");
2908   (void)ConsecutiveStride;
2909 
2910   VectorParts BlockInMaskParts(UF);
2911   bool isMaskRequired = BlockInMask;
2912   if (isMaskRequired)
2913     for (unsigned Part = 0; Part < UF; ++Part)
2914       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2915 
2916   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2917     // Calculate the pointer for the specific unroll-part.
2918     GetElementPtrInst *PartPtr = nullptr;
2919 
2920     bool InBounds = false;
2921     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2922       InBounds = gep->isInBounds();
2923     if (Reverse) {
2924       // If the address is consecutive but reversed, then the
2925       // wide store needs to start at the last vector element.
2926       // RunTimeVF =  VScale * VF.getKnownMinValue()
2927       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
2928       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF);
2929       // NumElt = -Part * RunTimeVF
2930       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
2931       // LastLane = 1 - RunTimeVF
2932       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
2933       PartPtr =
2934           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
2935       PartPtr->setIsInBounds(InBounds);
2936       PartPtr = cast<GetElementPtrInst>(
2937           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
2938       PartPtr->setIsInBounds(InBounds);
2939       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2940         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2941     } else {
2942       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2943       PartPtr = cast<GetElementPtrInst>(
2944           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2945       PartPtr->setIsInBounds(InBounds);
2946     }
2947 
2948     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2949     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2950   };
2951 
2952   // Handle Stores:
2953   if (SI) {
2954     setDebugLocFromInst(Builder, SI);
2955 
2956     for (unsigned Part = 0; Part < UF; ++Part) {
2957       Instruction *NewSI = nullptr;
2958       Value *StoredVal = State.get(StoredValue, Part);
2959       if (CreateGatherScatter) {
2960         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2961         Value *VectorGep = State.get(Addr, Part);
2962         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2963                                             MaskPart);
2964       } else {
2965         if (Reverse) {
2966           // If we store to reverse consecutive memory locations, then we need
2967           // to reverse the order of elements in the stored value.
2968           StoredVal = reverseVector(StoredVal);
2969           // We don't want to update the value in the map as it might be used in
2970           // another expression. So don't call resetVectorValue(StoredVal).
2971         }
2972         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2973         if (isMaskRequired)
2974           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2975                                             BlockInMaskParts[Part]);
2976         else
2977           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2978       }
2979       addMetadata(NewSI, SI);
2980     }
2981     return;
2982   }
2983 
2984   // Handle loads.
2985   assert(LI && "Must have a load instruction");
2986   setDebugLocFromInst(Builder, LI);
2987   for (unsigned Part = 0; Part < UF; ++Part) {
2988     Value *NewLI;
2989     if (CreateGatherScatter) {
2990       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2991       Value *VectorGep = State.get(Addr, Part);
2992       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2993                                          nullptr, "wide.masked.gather");
2994       addMetadata(NewLI, LI);
2995     } else {
2996       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2997       if (isMaskRequired)
2998         NewLI = Builder.CreateMaskedLoad(
2999             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
3000             "wide.masked.load");
3001       else
3002         NewLI =
3003             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
3004 
3005       // Add metadata to the load, but setVectorValue to the reverse shuffle.
3006       addMetadata(NewLI, LI);
3007       if (Reverse)
3008         NewLI = reverseVector(NewLI);
3009     }
3010 
3011     State.set(Def, NewLI, Part);
3012   }
3013 }
3014 
3015 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
3016                                                VPUser &User,
3017                                                const VPIteration &Instance,
3018                                                bool IfPredicateInstr,
3019                                                VPTransformState &State) {
3020   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3021 
3022   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
3023   // the first lane and part.
3024   if (isa<NoAliasScopeDeclInst>(Instr))
3025     if (!Instance.isFirstIteration())
3026       return;
3027 
3028   setDebugLocFromInst(Builder, Instr);
3029 
3030   // Does this instruction return a value ?
3031   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3032 
3033   Instruction *Cloned = Instr->clone();
3034   if (!IsVoidRetTy)
3035     Cloned->setName(Instr->getName() + ".cloned");
3036 
3037   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3038                                Builder.GetInsertPoint());
3039   // Replace the operands of the cloned instructions with their scalar
3040   // equivalents in the new loop.
3041   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
3042     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3043     auto InputInstance = Instance;
3044     if (!Operand || !OrigLoop->contains(Operand) ||
3045         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3046       InputInstance.Lane = VPLane::getFirstLane();
3047     auto *NewOp = State.get(User.getOperand(op), InputInstance);
3048     Cloned->setOperand(op, NewOp);
3049   }
3050   addNewMetadata(Cloned, Instr);
3051 
3052   // Place the cloned scalar in the new loop.
3053   Builder.Insert(Cloned);
3054 
3055   State.set(Def, Cloned, Instance);
3056 
3057   // If we just cloned a new assumption, add it the assumption cache.
3058   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3059     AC->registerAssumption(II);
3060 
3061   // End if-block.
3062   if (IfPredicateInstr)
3063     PredicatedInstructions.push_back(Cloned);
3064 }
3065 
3066 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3067                                                       Value *End, Value *Step,
3068                                                       Instruction *DL) {
3069   BasicBlock *Header = L->getHeader();
3070   BasicBlock *Latch = L->getLoopLatch();
3071   // As we're just creating this loop, it's possible no latch exists
3072   // yet. If so, use the header as this will be a single block loop.
3073   if (!Latch)
3074     Latch = Header;
3075 
3076   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3077   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3078   setDebugLocFromInst(Builder, OldInst);
3079   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3080 
3081   Builder.SetInsertPoint(Latch->getTerminator());
3082   setDebugLocFromInst(Builder, OldInst);
3083 
3084   // Create i+1 and fill the PHINode.
3085   //
3086   // If the tail is not folded, we know that End - Start >= Step (either
3087   // statically or through the minimum iteration checks). We also know that both
3088   // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV +
3089   // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned
3090   // overflows and we can mark the induction increment as NUW.
3091   Value *Next =
3092       Builder.CreateAdd(Induction, Step, "index.next",
3093                         /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false);
3094   Induction->addIncoming(Start, L->getLoopPreheader());
3095   Induction->addIncoming(Next, Latch);
3096   // Create the compare.
3097   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3098   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3099 
3100   // Now we have two terminators. Remove the old one from the block.
3101   Latch->getTerminator()->eraseFromParent();
3102 
3103   return Induction;
3104 }
3105 
3106 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3107   if (TripCount)
3108     return TripCount;
3109 
3110   assert(L && "Create Trip Count for null loop.");
3111   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3112   // Find the loop boundaries.
3113   ScalarEvolution *SE = PSE.getSE();
3114   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3115   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3116          "Invalid loop count");
3117 
3118   Type *IdxTy = Legal->getWidestInductionType();
3119   assert(IdxTy && "No type for induction");
3120 
3121   // The exit count might have the type of i64 while the phi is i32. This can
3122   // happen if we have an induction variable that is sign extended before the
3123   // compare. The only way that we get a backedge taken count is that the
3124   // induction variable was signed and as such will not overflow. In such a case
3125   // truncation is legal.
3126   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3127       IdxTy->getPrimitiveSizeInBits())
3128     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3129   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3130 
3131   // Get the total trip count from the count by adding 1.
3132   const SCEV *ExitCount = SE->getAddExpr(
3133       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3134 
3135   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3136 
3137   // Expand the trip count and place the new instructions in the preheader.
3138   // Notice that the pre-header does not change, only the loop body.
3139   SCEVExpander Exp(*SE, DL, "induction");
3140 
3141   // Count holds the overall loop count (N).
3142   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3143                                 L->getLoopPreheader()->getTerminator());
3144 
3145   if (TripCount->getType()->isPointerTy())
3146     TripCount =
3147         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3148                                     L->getLoopPreheader()->getTerminator());
3149 
3150   return TripCount;
3151 }
3152 
3153 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3154   if (VectorTripCount)
3155     return VectorTripCount;
3156 
3157   Value *TC = getOrCreateTripCount(L);
3158   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3159 
3160   Type *Ty = TC->getType();
3161   // This is where we can make the step a runtime constant.
3162   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3163 
3164   // If the tail is to be folded by masking, round the number of iterations N
3165   // up to a multiple of Step instead of rounding down. This is done by first
3166   // adding Step-1 and then rounding down. Note that it's ok if this addition
3167   // overflows: the vector induction variable will eventually wrap to zero given
3168   // that it starts at zero and its Step is a power of two; the loop will then
3169   // exit, with the last early-exit vector comparison also producing all-true.
3170   if (Cost->foldTailByMasking()) {
3171     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3172            "VF*UF must be a power of 2 when folding tail by masking");
3173     assert(!VF.isScalable() &&
3174            "Tail folding not yet supported for scalable vectors");
3175     TC = Builder.CreateAdd(
3176         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3177   }
3178 
3179   // Now we need to generate the expression for the part of the loop that the
3180   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3181   // iterations are not required for correctness, or N - Step, otherwise. Step
3182   // is equal to the vectorization factor (number of SIMD elements) times the
3183   // unroll factor (number of SIMD instructions).
3184   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3185 
3186   // There are two cases where we need to ensure (at least) the last iteration
3187   // runs in the scalar remainder loop. Thus, if the step evenly divides
3188   // the trip count, we set the remainder to be equal to the step. If the step
3189   // does not evenly divide the trip count, no adjustment is necessary since
3190   // there will already be scalar iterations. Note that the minimum iterations
3191   // check ensures that N >= Step. The cases are:
3192   // 1) If there is a non-reversed interleaved group that may speculatively
3193   //    access memory out-of-bounds.
3194   // 2) If any instruction may follow a conditionally taken exit. That is, if
3195   //    the loop contains multiple exiting blocks, or a single exiting block
3196   //    which is not the latch.
3197   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3198     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3199     R = Builder.CreateSelect(IsZero, Step, R);
3200   }
3201 
3202   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3203 
3204   return VectorTripCount;
3205 }
3206 
3207 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3208                                                    const DataLayout &DL) {
3209   // Verify that V is a vector type with same number of elements as DstVTy.
3210   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3211   unsigned VF = DstFVTy->getNumElements();
3212   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3213   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3214   Type *SrcElemTy = SrcVecTy->getElementType();
3215   Type *DstElemTy = DstFVTy->getElementType();
3216   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3217          "Vector elements must have same size");
3218 
3219   // Do a direct cast if element types are castable.
3220   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3221     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3222   }
3223   // V cannot be directly casted to desired vector type.
3224   // May happen when V is a floating point vector but DstVTy is a vector of
3225   // pointers or vice-versa. Handle this using a two-step bitcast using an
3226   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3227   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3228          "Only one type should be a pointer type");
3229   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3230          "Only one type should be a floating point type");
3231   Type *IntTy =
3232       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3233   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3234   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3235   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3236 }
3237 
3238 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3239                                                          BasicBlock *Bypass) {
3240   Value *Count = getOrCreateTripCount(L);
3241   // Reuse existing vector loop preheader for TC checks.
3242   // Note that new preheader block is generated for vector loop.
3243   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3244   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3245 
3246   // Generate code to check if the loop's trip count is less than VF * UF, or
3247   // equal to it in case a scalar epilogue is required; this implies that the
3248   // vector trip count is zero. This check also covers the case where adding one
3249   // to the backedge-taken count overflowed leading to an incorrect trip count
3250   // of zero. In this case we will also jump to the scalar loop.
3251   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3252                                           : ICmpInst::ICMP_ULT;
3253 
3254   // If tail is to be folded, vector loop takes care of all iterations.
3255   Value *CheckMinIters = Builder.getFalse();
3256   if (!Cost->foldTailByMasking()) {
3257     Value *Step =
3258         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3259     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3260   }
3261   // Create new preheader for vector loop.
3262   LoopVectorPreHeader =
3263       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3264                  "vector.ph");
3265 
3266   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3267                                DT->getNode(Bypass)->getIDom()) &&
3268          "TC check is expected to dominate Bypass");
3269 
3270   // Update dominator for Bypass & LoopExit.
3271   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3272   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3273 
3274   ReplaceInstWithInst(
3275       TCCheckBlock->getTerminator(),
3276       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3277   LoopBypassBlocks.push_back(TCCheckBlock);
3278 }
3279 
3280 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3281 
3282   BasicBlock *const SCEVCheckBlock =
3283       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3284   if (!SCEVCheckBlock)
3285     return nullptr;
3286 
3287   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3288            (OptForSizeBasedOnProfile &&
3289             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3290          "Cannot SCEV check stride or overflow when optimizing for size");
3291 
3292 
3293   // Update dominator only if this is first RT check.
3294   if (LoopBypassBlocks.empty()) {
3295     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3296     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3297   }
3298 
3299   LoopBypassBlocks.push_back(SCEVCheckBlock);
3300   AddedSafetyChecks = true;
3301   return SCEVCheckBlock;
3302 }
3303 
3304 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3305                                                       BasicBlock *Bypass) {
3306   // VPlan-native path does not do any analysis for runtime checks currently.
3307   if (EnableVPlanNativePath)
3308     return nullptr;
3309 
3310   BasicBlock *const MemCheckBlock =
3311       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3312 
3313   // Check if we generated code that checks in runtime if arrays overlap. We put
3314   // the checks into a separate block to make the more common case of few
3315   // elements faster.
3316   if (!MemCheckBlock)
3317     return nullptr;
3318 
3319   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3320     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3321            "Cannot emit memory checks when optimizing for size, unless forced "
3322            "to vectorize.");
3323     ORE->emit([&]() {
3324       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3325                                         L->getStartLoc(), L->getHeader())
3326              << "Code-size may be reduced by not forcing "
3327                 "vectorization, or by source-code modifications "
3328                 "eliminating the need for runtime checks "
3329                 "(e.g., adding 'restrict').";
3330     });
3331   }
3332 
3333   LoopBypassBlocks.push_back(MemCheckBlock);
3334 
3335   AddedSafetyChecks = true;
3336 
3337   // We currently don't use LoopVersioning for the actual loop cloning but we
3338   // still use it to add the noalias metadata.
3339   LVer = std::make_unique<LoopVersioning>(
3340       *Legal->getLAI(),
3341       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3342       DT, PSE.getSE());
3343   LVer->prepareNoAliasMetadata();
3344   return MemCheckBlock;
3345 }
3346 
3347 Value *InnerLoopVectorizer::emitTransformedIndex(
3348     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3349     const InductionDescriptor &ID) const {
3350 
3351   SCEVExpander Exp(*SE, DL, "induction");
3352   auto Step = ID.getStep();
3353   auto StartValue = ID.getStartValue();
3354   assert(Index->getType()->getScalarType() == Step->getType() &&
3355          "Index scalar type does not match StepValue type");
3356 
3357   // Note: the IR at this point is broken. We cannot use SE to create any new
3358   // SCEV and then expand it, hoping that SCEV's simplification will give us
3359   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3360   // lead to various SCEV crashes. So all we can do is to use builder and rely
3361   // on InstCombine for future simplifications. Here we handle some trivial
3362   // cases only.
3363   auto CreateAdd = [&B](Value *X, Value *Y) {
3364     assert(X->getType() == Y->getType() && "Types don't match!");
3365     if (auto *CX = dyn_cast<ConstantInt>(X))
3366       if (CX->isZero())
3367         return Y;
3368     if (auto *CY = dyn_cast<ConstantInt>(Y))
3369       if (CY->isZero())
3370         return X;
3371     return B.CreateAdd(X, Y);
3372   };
3373 
3374   // We allow X to be a vector type, in which case Y will potentially be
3375   // splatted into a vector with the same element count.
3376   auto CreateMul = [&B](Value *X, Value *Y) {
3377     assert(X->getType()->getScalarType() == Y->getType() &&
3378            "Types don't match!");
3379     if (auto *CX = dyn_cast<ConstantInt>(X))
3380       if (CX->isOne())
3381         return Y;
3382     if (auto *CY = dyn_cast<ConstantInt>(Y))
3383       if (CY->isOne())
3384         return X;
3385     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3386     if (XVTy && !isa<VectorType>(Y->getType()))
3387       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3388     return B.CreateMul(X, Y);
3389   };
3390 
3391   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3392   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3393   // the DomTree is not kept up-to-date for additional blocks generated in the
3394   // vector loop. By using the header as insertion point, we guarantee that the
3395   // expanded instructions dominate all their uses.
3396   auto GetInsertPoint = [this, &B]() {
3397     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3398     if (InsertBB != LoopVectorBody &&
3399         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3400       return LoopVectorBody->getTerminator();
3401     return &*B.GetInsertPoint();
3402   };
3403 
3404   switch (ID.getKind()) {
3405   case InductionDescriptor::IK_IntInduction: {
3406     assert(!isa<VectorType>(Index->getType()) &&
3407            "Vector indices not supported for integer inductions yet");
3408     assert(Index->getType() == StartValue->getType() &&
3409            "Index type does not match StartValue type");
3410     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3411       return B.CreateSub(StartValue, Index);
3412     auto *Offset = CreateMul(
3413         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3414     return CreateAdd(StartValue, Offset);
3415   }
3416   case InductionDescriptor::IK_PtrInduction: {
3417     assert(isa<SCEVConstant>(Step) &&
3418            "Expected constant step for pointer induction");
3419     return B.CreateGEP(
3420         StartValue->getType()->getPointerElementType(), StartValue,
3421         CreateMul(Index,
3422                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3423                                     GetInsertPoint())));
3424   }
3425   case InductionDescriptor::IK_FpInduction: {
3426     assert(!isa<VectorType>(Index->getType()) &&
3427            "Vector indices not supported for FP inductions yet");
3428     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3429     auto InductionBinOp = ID.getInductionBinOp();
3430     assert(InductionBinOp &&
3431            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3432             InductionBinOp->getOpcode() == Instruction::FSub) &&
3433            "Original bin op should be defined for FP induction");
3434 
3435     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3436     Value *MulExp = B.CreateFMul(StepValue, Index);
3437     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3438                          "induction");
3439   }
3440   case InductionDescriptor::IK_NoInduction:
3441     return nullptr;
3442   }
3443   llvm_unreachable("invalid enum");
3444 }
3445 
3446 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3447   LoopScalarBody = OrigLoop->getHeader();
3448   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3449   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3450   assert(LoopExitBlock && "Must have an exit block");
3451   assert(LoopVectorPreHeader && "Invalid loop structure");
3452 
3453   LoopMiddleBlock =
3454       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3455                  LI, nullptr, Twine(Prefix) + "middle.block");
3456   LoopScalarPreHeader =
3457       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3458                  nullptr, Twine(Prefix) + "scalar.ph");
3459 
3460   // Set up branch from middle block to the exit and scalar preheader blocks.
3461   // completeLoopSkeleton will update the condition to use an iteration check,
3462   // if required to decide whether to execute the remainder.
3463   BranchInst *BrInst =
3464       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3465   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3466   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3467   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3468 
3469   // We intentionally don't let SplitBlock to update LoopInfo since
3470   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3471   // LoopVectorBody is explicitly added to the correct place few lines later.
3472   LoopVectorBody =
3473       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3474                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3475 
3476   // Update dominator for loop exit.
3477   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3478 
3479   // Create and register the new vector loop.
3480   Loop *Lp = LI->AllocateLoop();
3481   Loop *ParentLoop = OrigLoop->getParentLoop();
3482 
3483   // Insert the new loop into the loop nest and register the new basic blocks
3484   // before calling any utilities such as SCEV that require valid LoopInfo.
3485   if (ParentLoop) {
3486     ParentLoop->addChildLoop(Lp);
3487   } else {
3488     LI->addTopLevelLoop(Lp);
3489   }
3490   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3491   return Lp;
3492 }
3493 
3494 void InnerLoopVectorizer::createInductionResumeValues(
3495     Loop *L, Value *VectorTripCount,
3496     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3497   assert(VectorTripCount && L && "Expected valid arguments");
3498   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3499           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3500          "Inconsistent information about additional bypass.");
3501   // We are going to resume the execution of the scalar loop.
3502   // Go over all of the induction variables that we found and fix the
3503   // PHIs that are left in the scalar version of the loop.
3504   // The starting values of PHI nodes depend on the counter of the last
3505   // iteration in the vectorized loop.
3506   // If we come from a bypass edge then we need to start from the original
3507   // start value.
3508   for (auto &InductionEntry : Legal->getInductionVars()) {
3509     PHINode *OrigPhi = InductionEntry.first;
3510     InductionDescriptor II = InductionEntry.second;
3511 
3512     // Create phi nodes to merge from the  backedge-taken check block.
3513     PHINode *BCResumeVal =
3514         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3515                         LoopScalarPreHeader->getTerminator());
3516     // Copy original phi DL over to the new one.
3517     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3518     Value *&EndValue = IVEndValues[OrigPhi];
3519     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3520     if (OrigPhi == OldInduction) {
3521       // We know what the end value is.
3522       EndValue = VectorTripCount;
3523     } else {
3524       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3525 
3526       // Fast-math-flags propagate from the original induction instruction.
3527       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3528         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3529 
3530       Type *StepType = II.getStep()->getType();
3531       Instruction::CastOps CastOp =
3532           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3533       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3534       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3535       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3536       EndValue->setName("ind.end");
3537 
3538       // Compute the end value for the additional bypass (if applicable).
3539       if (AdditionalBypass.first) {
3540         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3541         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3542                                          StepType, true);
3543         CRD =
3544             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3545         EndValueFromAdditionalBypass =
3546             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3547         EndValueFromAdditionalBypass->setName("ind.end");
3548       }
3549     }
3550     // The new PHI merges the original incoming value, in case of a bypass,
3551     // or the value at the end of the vectorized loop.
3552     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3553 
3554     // Fix the scalar body counter (PHI node).
3555     // The old induction's phi node in the scalar body needs the truncated
3556     // value.
3557     for (BasicBlock *BB : LoopBypassBlocks)
3558       BCResumeVal->addIncoming(II.getStartValue(), BB);
3559 
3560     if (AdditionalBypass.first)
3561       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3562                                             EndValueFromAdditionalBypass);
3563 
3564     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3565   }
3566 }
3567 
3568 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3569                                                       MDNode *OrigLoopID) {
3570   assert(L && "Expected valid loop.");
3571 
3572   // The trip counts should be cached by now.
3573   Value *Count = getOrCreateTripCount(L);
3574   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3575 
3576   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3577 
3578   // Add a check in the middle block to see if we have completed
3579   // all of the iterations in the first vector loop.
3580   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3581   // If tail is to be folded, we know we don't need to run the remainder.
3582   if (!Cost->foldTailByMasking()) {
3583     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3584                                         Count, VectorTripCount, "cmp.n",
3585                                         LoopMiddleBlock->getTerminator());
3586 
3587     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3588     // of the corresponding compare because they may have ended up with
3589     // different line numbers and we want to avoid awkward line stepping while
3590     // debugging. Eg. if the compare has got a line number inside the loop.
3591     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3592     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3593   }
3594 
3595   // Get ready to start creating new instructions into the vectorized body.
3596   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3597          "Inconsistent vector loop preheader");
3598   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3599 
3600   Optional<MDNode *> VectorizedLoopID =
3601       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3602                                       LLVMLoopVectorizeFollowupVectorized});
3603   if (VectorizedLoopID.hasValue()) {
3604     L->setLoopID(VectorizedLoopID.getValue());
3605 
3606     // Do not setAlreadyVectorized if loop attributes have been defined
3607     // explicitly.
3608     return LoopVectorPreHeader;
3609   }
3610 
3611   // Keep all loop hints from the original loop on the vector loop (we'll
3612   // replace the vectorizer-specific hints below).
3613   if (MDNode *LID = OrigLoop->getLoopID())
3614     L->setLoopID(LID);
3615 
3616   LoopVectorizeHints Hints(L, true, *ORE);
3617   Hints.setAlreadyVectorized();
3618 
3619 #ifdef EXPENSIVE_CHECKS
3620   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3621   LI->verify(*DT);
3622 #endif
3623 
3624   return LoopVectorPreHeader;
3625 }
3626 
3627 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3628   /*
3629    In this function we generate a new loop. The new loop will contain
3630    the vectorized instructions while the old loop will continue to run the
3631    scalar remainder.
3632 
3633        [ ] <-- loop iteration number check.
3634     /   |
3635    /    v
3636   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3637   |  /  |
3638   | /   v
3639   ||   [ ]     <-- vector pre header.
3640   |/    |
3641   |     v
3642   |    [  ] \
3643   |    [  ]_|   <-- vector loop.
3644   |     |
3645   |     v
3646   |   -[ ]   <--- middle-block.
3647   |  /  |
3648   | /   v
3649   -|- >[ ]     <--- new preheader.
3650    |    |
3651    |    v
3652    |   [ ] \
3653    |   [ ]_|   <-- old scalar loop to handle remainder.
3654     \   |
3655      \  v
3656       >[ ]     <-- exit block.
3657    ...
3658    */
3659 
3660   // Get the metadata of the original loop before it gets modified.
3661   MDNode *OrigLoopID = OrigLoop->getLoopID();
3662 
3663   // Workaround!  Compute the trip count of the original loop and cache it
3664   // before we start modifying the CFG.  This code has a systemic problem
3665   // wherein it tries to run analysis over partially constructed IR; this is
3666   // wrong, and not simply for SCEV.  The trip count of the original loop
3667   // simply happens to be prone to hitting this in practice.  In theory, we
3668   // can hit the same issue for any SCEV, or ValueTracking query done during
3669   // mutation.  See PR49900.
3670   getOrCreateTripCount(OrigLoop);
3671 
3672   // Create an empty vector loop, and prepare basic blocks for the runtime
3673   // checks.
3674   Loop *Lp = createVectorLoopSkeleton("");
3675 
3676   // Now, compare the new count to zero. If it is zero skip the vector loop and
3677   // jump to the scalar loop. This check also covers the case where the
3678   // backedge-taken count is uint##_max: adding one to it will overflow leading
3679   // to an incorrect trip count of zero. In this (rare) case we will also jump
3680   // to the scalar loop.
3681   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3682 
3683   // Generate the code to check any assumptions that we've made for SCEV
3684   // expressions.
3685   emitSCEVChecks(Lp, LoopScalarPreHeader);
3686 
3687   // Generate the code that checks in runtime if arrays overlap. We put the
3688   // checks into a separate block to make the more common case of few elements
3689   // faster.
3690   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3691 
3692   // Some loops have a single integer induction variable, while other loops
3693   // don't. One example is c++ iterators that often have multiple pointer
3694   // induction variables. In the code below we also support a case where we
3695   // don't have a single induction variable.
3696   //
3697   // We try to obtain an induction variable from the original loop as hard
3698   // as possible. However if we don't find one that:
3699   //   - is an integer
3700   //   - counts from zero, stepping by one
3701   //   - is the size of the widest induction variable type
3702   // then we create a new one.
3703   OldInduction = Legal->getPrimaryInduction();
3704   Type *IdxTy = Legal->getWidestInductionType();
3705   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3706   // The loop step is equal to the vectorization factor (num of SIMD elements)
3707   // times the unroll factor (num of SIMD instructions).
3708   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3709   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3710   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3711   Induction =
3712       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3713                               getDebugLocFromInstOrOperands(OldInduction));
3714 
3715   // Emit phis for the new starting index of the scalar loop.
3716   createInductionResumeValues(Lp, CountRoundDown);
3717 
3718   return completeLoopSkeleton(Lp, OrigLoopID);
3719 }
3720 
3721 // Fix up external users of the induction variable. At this point, we are
3722 // in LCSSA form, with all external PHIs that use the IV having one input value,
3723 // coming from the remainder loop. We need those PHIs to also have a correct
3724 // value for the IV when arriving directly from the middle block.
3725 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3726                                        const InductionDescriptor &II,
3727                                        Value *CountRoundDown, Value *EndValue,
3728                                        BasicBlock *MiddleBlock) {
3729   // There are two kinds of external IV usages - those that use the value
3730   // computed in the last iteration (the PHI) and those that use the penultimate
3731   // value (the value that feeds into the phi from the loop latch).
3732   // We allow both, but they, obviously, have different values.
3733 
3734   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3735 
3736   DenseMap<Value *, Value *> MissingVals;
3737 
3738   // An external user of the last iteration's value should see the value that
3739   // the remainder loop uses to initialize its own IV.
3740   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3741   for (User *U : PostInc->users()) {
3742     Instruction *UI = cast<Instruction>(U);
3743     if (!OrigLoop->contains(UI)) {
3744       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3745       MissingVals[UI] = EndValue;
3746     }
3747   }
3748 
3749   // An external user of the penultimate value need to see EndValue - Step.
3750   // The simplest way to get this is to recompute it from the constituent SCEVs,
3751   // that is Start + (Step * (CRD - 1)).
3752   for (User *U : OrigPhi->users()) {
3753     auto *UI = cast<Instruction>(U);
3754     if (!OrigLoop->contains(UI)) {
3755       const DataLayout &DL =
3756           OrigLoop->getHeader()->getModule()->getDataLayout();
3757       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3758 
3759       IRBuilder<> B(MiddleBlock->getTerminator());
3760 
3761       // Fast-math-flags propagate from the original induction instruction.
3762       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3763         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3764 
3765       Value *CountMinusOne = B.CreateSub(
3766           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3767       Value *CMO =
3768           !II.getStep()->getType()->isIntegerTy()
3769               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3770                              II.getStep()->getType())
3771               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3772       CMO->setName("cast.cmo");
3773       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3774       Escape->setName("ind.escape");
3775       MissingVals[UI] = Escape;
3776     }
3777   }
3778 
3779   for (auto &I : MissingVals) {
3780     PHINode *PHI = cast<PHINode>(I.first);
3781     // One corner case we have to handle is two IVs "chasing" each-other,
3782     // that is %IV2 = phi [...], [ %IV1, %latch ]
3783     // In this case, if IV1 has an external use, we need to avoid adding both
3784     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3785     // don't already have an incoming value for the middle block.
3786     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3787       PHI->addIncoming(I.second, MiddleBlock);
3788   }
3789 }
3790 
3791 namespace {
3792 
3793 struct CSEDenseMapInfo {
3794   static bool canHandle(const Instruction *I) {
3795     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3796            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3797   }
3798 
3799   static inline Instruction *getEmptyKey() {
3800     return DenseMapInfo<Instruction *>::getEmptyKey();
3801   }
3802 
3803   static inline Instruction *getTombstoneKey() {
3804     return DenseMapInfo<Instruction *>::getTombstoneKey();
3805   }
3806 
3807   static unsigned getHashValue(const Instruction *I) {
3808     assert(canHandle(I) && "Unknown instruction!");
3809     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3810                                                            I->value_op_end()));
3811   }
3812 
3813   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3814     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3815         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3816       return LHS == RHS;
3817     return LHS->isIdenticalTo(RHS);
3818   }
3819 };
3820 
3821 } // end anonymous namespace
3822 
3823 ///Perform cse of induction variable instructions.
3824 static void cse(BasicBlock *BB) {
3825   // Perform simple cse.
3826   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3827   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3828     Instruction *In = &*I++;
3829 
3830     if (!CSEDenseMapInfo::canHandle(In))
3831       continue;
3832 
3833     // Check if we can replace this instruction with any of the
3834     // visited instructions.
3835     if (Instruction *V = CSEMap.lookup(In)) {
3836       In->replaceAllUsesWith(V);
3837       In->eraseFromParent();
3838       continue;
3839     }
3840 
3841     CSEMap[In] = In;
3842   }
3843 }
3844 
3845 InstructionCost
3846 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3847                                               bool &NeedToScalarize) const {
3848   Function *F = CI->getCalledFunction();
3849   Type *ScalarRetTy = CI->getType();
3850   SmallVector<Type *, 4> Tys, ScalarTys;
3851   for (auto &ArgOp : CI->arg_operands())
3852     ScalarTys.push_back(ArgOp->getType());
3853 
3854   // Estimate cost of scalarized vector call. The source operands are assumed
3855   // to be vectors, so we need to extract individual elements from there,
3856   // execute VF scalar calls, and then gather the result into the vector return
3857   // value.
3858   InstructionCost ScalarCallCost =
3859       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3860   if (VF.isScalar())
3861     return ScalarCallCost;
3862 
3863   // Compute corresponding vector type for return value and arguments.
3864   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3865   for (Type *ScalarTy : ScalarTys)
3866     Tys.push_back(ToVectorTy(ScalarTy, VF));
3867 
3868   // Compute costs of unpacking argument values for the scalar calls and
3869   // packing the return values to a vector.
3870   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3871 
3872   InstructionCost Cost =
3873       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3874 
3875   // If we can't emit a vector call for this function, then the currently found
3876   // cost is the cost we need to return.
3877   NeedToScalarize = true;
3878   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3879   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3880 
3881   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3882     return Cost;
3883 
3884   // If the corresponding vector cost is cheaper, return its cost.
3885   InstructionCost VectorCallCost =
3886       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3887   if (VectorCallCost < Cost) {
3888     NeedToScalarize = false;
3889     Cost = VectorCallCost;
3890   }
3891   return Cost;
3892 }
3893 
3894 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3895   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3896     return Elt;
3897   return VectorType::get(Elt, VF);
3898 }
3899 
3900 InstructionCost
3901 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3902                                                    ElementCount VF) const {
3903   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3904   assert(ID && "Expected intrinsic call!");
3905   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3906   FastMathFlags FMF;
3907   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3908     FMF = FPMO->getFastMathFlags();
3909 
3910   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3911   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3912   SmallVector<Type *> ParamTys;
3913   std::transform(FTy->param_begin(), FTy->param_end(),
3914                  std::back_inserter(ParamTys),
3915                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3916 
3917   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3918                                     dyn_cast<IntrinsicInst>(CI));
3919   return TTI.getIntrinsicInstrCost(CostAttrs,
3920                                    TargetTransformInfo::TCK_RecipThroughput);
3921 }
3922 
3923 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3924   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3925   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3926   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3927 }
3928 
3929 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3930   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3931   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3932   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3933 }
3934 
3935 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3936   // For every instruction `I` in MinBWs, truncate the operands, create a
3937   // truncated version of `I` and reextend its result. InstCombine runs
3938   // later and will remove any ext/trunc pairs.
3939   SmallPtrSet<Value *, 4> Erased;
3940   for (const auto &KV : Cost->getMinimalBitwidths()) {
3941     // If the value wasn't vectorized, we must maintain the original scalar
3942     // type. The absence of the value from State indicates that it
3943     // wasn't vectorized.
3944     VPValue *Def = State.Plan->getVPValue(KV.first);
3945     if (!State.hasAnyVectorValue(Def))
3946       continue;
3947     for (unsigned Part = 0; Part < UF; ++Part) {
3948       Value *I = State.get(Def, Part);
3949       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3950         continue;
3951       Type *OriginalTy = I->getType();
3952       Type *ScalarTruncatedTy =
3953           IntegerType::get(OriginalTy->getContext(), KV.second);
3954       auto *TruncatedTy = FixedVectorType::get(
3955           ScalarTruncatedTy,
3956           cast<FixedVectorType>(OriginalTy)->getNumElements());
3957       if (TruncatedTy == OriginalTy)
3958         continue;
3959 
3960       IRBuilder<> B(cast<Instruction>(I));
3961       auto ShrinkOperand = [&](Value *V) -> Value * {
3962         if (auto *ZI = dyn_cast<ZExtInst>(V))
3963           if (ZI->getSrcTy() == TruncatedTy)
3964             return ZI->getOperand(0);
3965         return B.CreateZExtOrTrunc(V, TruncatedTy);
3966       };
3967 
3968       // The actual instruction modification depends on the instruction type,
3969       // unfortunately.
3970       Value *NewI = nullptr;
3971       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3972         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3973                              ShrinkOperand(BO->getOperand(1)));
3974 
3975         // Any wrapping introduced by shrinking this operation shouldn't be
3976         // considered undefined behavior. So, we can't unconditionally copy
3977         // arithmetic wrapping flags to NewI.
3978         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3979       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3980         NewI =
3981             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3982                          ShrinkOperand(CI->getOperand(1)));
3983       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3984         NewI = B.CreateSelect(SI->getCondition(),
3985                               ShrinkOperand(SI->getTrueValue()),
3986                               ShrinkOperand(SI->getFalseValue()));
3987       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3988         switch (CI->getOpcode()) {
3989         default:
3990           llvm_unreachable("Unhandled cast!");
3991         case Instruction::Trunc:
3992           NewI = ShrinkOperand(CI->getOperand(0));
3993           break;
3994         case Instruction::SExt:
3995           NewI = B.CreateSExtOrTrunc(
3996               CI->getOperand(0),
3997               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3998           break;
3999         case Instruction::ZExt:
4000           NewI = B.CreateZExtOrTrunc(
4001               CI->getOperand(0),
4002               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4003           break;
4004         }
4005       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
4006         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
4007                              ->getNumElements();
4008         auto *O0 = B.CreateZExtOrTrunc(
4009             SI->getOperand(0),
4010             FixedVectorType::get(ScalarTruncatedTy, Elements0));
4011         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
4012                              ->getNumElements();
4013         auto *O1 = B.CreateZExtOrTrunc(
4014             SI->getOperand(1),
4015             FixedVectorType::get(ScalarTruncatedTy, Elements1));
4016 
4017         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
4018       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
4019         // Don't do anything with the operands, just extend the result.
4020         continue;
4021       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
4022         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
4023                             ->getNumElements();
4024         auto *O0 = B.CreateZExtOrTrunc(
4025             IE->getOperand(0),
4026             FixedVectorType::get(ScalarTruncatedTy, Elements));
4027         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4028         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4029       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4030         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
4031                             ->getNumElements();
4032         auto *O0 = B.CreateZExtOrTrunc(
4033             EE->getOperand(0),
4034             FixedVectorType::get(ScalarTruncatedTy, Elements));
4035         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4036       } else {
4037         // If we don't know what to do, be conservative and don't do anything.
4038         continue;
4039       }
4040 
4041       // Lastly, extend the result.
4042       NewI->takeName(cast<Instruction>(I));
4043       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4044       I->replaceAllUsesWith(Res);
4045       cast<Instruction>(I)->eraseFromParent();
4046       Erased.insert(I);
4047       State.reset(Def, Res, Part);
4048     }
4049   }
4050 
4051   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4052   for (const auto &KV : Cost->getMinimalBitwidths()) {
4053     // If the value wasn't vectorized, we must maintain the original scalar
4054     // type. The absence of the value from State indicates that it
4055     // wasn't vectorized.
4056     VPValue *Def = State.Plan->getVPValue(KV.first);
4057     if (!State.hasAnyVectorValue(Def))
4058       continue;
4059     for (unsigned Part = 0; Part < UF; ++Part) {
4060       Value *I = State.get(Def, Part);
4061       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4062       if (Inst && Inst->use_empty()) {
4063         Value *NewI = Inst->getOperand(0);
4064         Inst->eraseFromParent();
4065         State.reset(Def, NewI, Part);
4066       }
4067     }
4068   }
4069 }
4070 
4071 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4072   // Insert truncates and extends for any truncated instructions as hints to
4073   // InstCombine.
4074   if (VF.isVector())
4075     truncateToMinimalBitwidths(State);
4076 
4077   // Fix widened non-induction PHIs by setting up the PHI operands.
4078   if (OrigPHIsToFix.size()) {
4079     assert(EnableVPlanNativePath &&
4080            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4081     fixNonInductionPHIs(State);
4082   }
4083 
4084   // At this point every instruction in the original loop is widened to a
4085   // vector form. Now we need to fix the recurrences in the loop. These PHI
4086   // nodes are currently empty because we did not want to introduce cycles.
4087   // This is the second stage of vectorizing recurrences.
4088   fixCrossIterationPHIs(State);
4089 
4090   // Forget the original basic block.
4091   PSE.getSE()->forgetLoop(OrigLoop);
4092 
4093   // Fix-up external users of the induction variables.
4094   for (auto &Entry : Legal->getInductionVars())
4095     fixupIVUsers(Entry.first, Entry.second,
4096                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4097                  IVEndValues[Entry.first], LoopMiddleBlock);
4098 
4099   fixLCSSAPHIs(State);
4100   for (Instruction *PI : PredicatedInstructions)
4101     sinkScalarOperands(&*PI);
4102 
4103   // Remove redundant induction instructions.
4104   cse(LoopVectorBody);
4105 
4106   // Set/update profile weights for the vector and remainder loops as original
4107   // loop iterations are now distributed among them. Note that original loop
4108   // represented by LoopScalarBody becomes remainder loop after vectorization.
4109   //
4110   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4111   // end up getting slightly roughened result but that should be OK since
4112   // profile is not inherently precise anyway. Note also possible bypass of
4113   // vector code caused by legality checks is ignored, assigning all the weight
4114   // to the vector loop, optimistically.
4115   //
4116   // For scalable vectorization we can't know at compile time how many iterations
4117   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4118   // vscale of '1'.
4119   setProfileInfoAfterUnrolling(
4120       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4121       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4122 }
4123 
4124 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4125   // In order to support recurrences we need to be able to vectorize Phi nodes.
4126   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4127   // stage #2: We now need to fix the recurrences by adding incoming edges to
4128   // the currently empty PHI nodes. At this point every instruction in the
4129   // original loop is widened to a vector form so we can use them to construct
4130   // the incoming edges.
4131   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4132   for (VPRecipeBase &R : Header->phis()) {
4133     auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R);
4134     if (!PhiR)
4135       continue;
4136     auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4137     if (PhiR->getRecurrenceDescriptor()) {
4138       fixReduction(PhiR, State);
4139     } else if (Legal->isFirstOrderRecurrence(OrigPhi))
4140       fixFirstOrderRecurrence(OrigPhi, State);
4141   }
4142 }
4143 
4144 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
4145                                                   VPTransformState &State) {
4146   // This is the second phase of vectorizing first-order recurrences. An
4147   // overview of the transformation is described below. Suppose we have the
4148   // following loop.
4149   //
4150   //   for (int i = 0; i < n; ++i)
4151   //     b[i] = a[i] - a[i - 1];
4152   //
4153   // There is a first-order recurrence on "a". For this loop, the shorthand
4154   // scalar IR looks like:
4155   //
4156   //   scalar.ph:
4157   //     s_init = a[-1]
4158   //     br scalar.body
4159   //
4160   //   scalar.body:
4161   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4162   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4163   //     s2 = a[i]
4164   //     b[i] = s2 - s1
4165   //     br cond, scalar.body, ...
4166   //
4167   // In this example, s1 is a recurrence because it's value depends on the
4168   // previous iteration. In the first phase of vectorization, we created a
4169   // temporary value for s1. We now complete the vectorization and produce the
4170   // shorthand vector IR shown below (for VF = 4, UF = 1).
4171   //
4172   //   vector.ph:
4173   //     v_init = vector(..., ..., ..., a[-1])
4174   //     br vector.body
4175   //
4176   //   vector.body
4177   //     i = phi [0, vector.ph], [i+4, vector.body]
4178   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4179   //     v2 = a[i, i+1, i+2, i+3];
4180   //     v3 = vector(v1(3), v2(0, 1, 2))
4181   //     b[i, i+1, i+2, i+3] = v2 - v3
4182   //     br cond, vector.body, middle.block
4183   //
4184   //   middle.block:
4185   //     x = v2(3)
4186   //     br scalar.ph
4187   //
4188   //   scalar.ph:
4189   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4190   //     br scalar.body
4191   //
4192   // After execution completes the vector loop, we extract the next value of
4193   // the recurrence (x) to use as the initial value in the scalar loop.
4194 
4195   // Get the original loop preheader and single loop latch.
4196   auto *Preheader = OrigLoop->getLoopPreheader();
4197   auto *Latch = OrigLoop->getLoopLatch();
4198 
4199   // Get the initial and previous values of the scalar recurrence.
4200   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4201   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4202 
4203   auto *IdxTy = Builder.getInt32Ty();
4204   auto *One = ConstantInt::get(IdxTy, 1);
4205 
4206   // Create a vector from the initial value.
4207   auto *VectorInit = ScalarInit;
4208   if (VF.isVector()) {
4209     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4210     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4211     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4212     VectorInit = Builder.CreateInsertElement(
4213         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)),
4214         VectorInit, LastIdx, "vector.recur.init");
4215   }
4216 
4217   VPValue *PhiDef = State.Plan->getVPValue(Phi);
4218   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
4219   // We constructed a temporary phi node in the first phase of vectorization.
4220   // This phi node will eventually be deleted.
4221   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
4222 
4223   // Create a phi node for the new recurrence. The current value will either be
4224   // the initial value inserted into a vector or loop-varying vector value.
4225   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4226   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4227 
4228   // Get the vectorized previous value of the last part UF - 1. It appears last
4229   // among all unrolled iterations, due to the order of their construction.
4230   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4231 
4232   // Find and set the insertion point after the previous value if it is an
4233   // instruction.
4234   BasicBlock::iterator InsertPt;
4235   // Note that the previous value may have been constant-folded so it is not
4236   // guaranteed to be an instruction in the vector loop.
4237   // FIXME: Loop invariant values do not form recurrences. We should deal with
4238   //        them earlier.
4239   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4240     InsertPt = LoopVectorBody->getFirstInsertionPt();
4241   else {
4242     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4243     if (isa<PHINode>(PreviousLastPart))
4244       // If the previous value is a phi node, we should insert after all the phi
4245       // nodes in the block containing the PHI to avoid breaking basic block
4246       // verification. Note that the basic block may be different to
4247       // LoopVectorBody, in case we predicate the loop.
4248       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4249     else
4250       InsertPt = ++PreviousInst->getIterator();
4251   }
4252   Builder.SetInsertPoint(&*InsertPt);
4253 
4254   // The vector from which to take the initial value for the current iteration
4255   // (actual or unrolled). Initially, this is the vector phi node.
4256   Value *Incoming = VecPhi;
4257 
4258   // Shuffle the current and previous vector and update the vector parts.
4259   for (unsigned Part = 0; Part < UF; ++Part) {
4260     Value *PreviousPart = State.get(PreviousDef, Part);
4261     Value *PhiPart = State.get(PhiDef, Part);
4262     auto *Shuffle = VF.isVector()
4263                         ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1)
4264                         : Incoming;
4265     PhiPart->replaceAllUsesWith(Shuffle);
4266     cast<Instruction>(PhiPart)->eraseFromParent();
4267     State.reset(PhiDef, Shuffle, Part);
4268     Incoming = PreviousPart;
4269   }
4270 
4271   // Fix the latch value of the new recurrence in the vector loop.
4272   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4273 
4274   // Extract the last vector element in the middle block. This will be the
4275   // initial value for the recurrence when jumping to the scalar loop.
4276   auto *ExtractForScalar = Incoming;
4277   if (VF.isVector()) {
4278     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4279     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4280     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4281     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4282                                                     "vector.recur.extract");
4283   }
4284   // Extract the second last element in the middle block if the
4285   // Phi is used outside the loop. We need to extract the phi itself
4286   // and not the last element (the phi update in the current iteration). This
4287   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4288   // when the scalar loop is not run at all.
4289   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4290   if (VF.isVector()) {
4291     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4292     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4293     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4294         Incoming, Idx, "vector.recur.extract.for.phi");
4295   } else if (UF > 1)
4296     // When loop is unrolled without vectorizing, initialize
4297     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4298     // of `Incoming`. This is analogous to the vectorized case above: extracting
4299     // the second last element when VF > 1.
4300     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4301 
4302   // Fix the initial value of the original recurrence in the scalar loop.
4303   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4304   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4305   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4306     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4307     Start->addIncoming(Incoming, BB);
4308   }
4309 
4310   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4311   Phi->setName("scalar.recur");
4312 
4313   // Finally, fix users of the recurrence outside the loop. The users will need
4314   // either the last value of the scalar recurrence or the last value of the
4315   // vector recurrence we extracted in the middle block. Since the loop is in
4316   // LCSSA form, we just need to find all the phi nodes for the original scalar
4317   // recurrence in the exit block, and then add an edge for the middle block.
4318   // Note that LCSSA does not imply single entry when the original scalar loop
4319   // had multiple exiting edges (as we always run the last iteration in the
4320   // scalar epilogue); in that case, the exiting path through middle will be
4321   // dynamically dead and the value picked for the phi doesn't matter.
4322   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4323     if (any_of(LCSSAPhi.incoming_values(),
4324                [Phi](Value *V) { return V == Phi; }))
4325       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4326 }
4327 
4328 void InnerLoopVectorizer::fixReduction(VPWidenPHIRecipe *PhiR,
4329                                        VPTransformState &State) {
4330   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4331   // Get it's reduction variable descriptor.
4332   assert(Legal->isReductionVariable(OrigPhi) &&
4333          "Unable to find the reduction variable");
4334   const RecurrenceDescriptor &RdxDesc = *PhiR->getRecurrenceDescriptor();
4335 
4336   RecurKind RK = RdxDesc.getRecurrenceKind();
4337   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4338   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4339   setDebugLocFromInst(Builder, ReductionStartValue);
4340   bool IsInLoopReductionPhi = Cost->isInLoopReduction(OrigPhi);
4341 
4342   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4343   // This is the vector-clone of the value that leaves the loop.
4344   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4345 
4346   // Wrap flags are in general invalid after vectorization, clear them.
4347   clearReductionWrapFlags(RdxDesc, State);
4348 
4349   // Fix the vector-loop phi.
4350 
4351   // Reductions do not have to start at zero. They can start with
4352   // any loop invariant values.
4353   BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4354 
4355   bool IsOrdered = State.VF.isVector() && IsInLoopReductionPhi &&
4356                    Cost->useOrderedReductions(RdxDesc);
4357 
4358   for (unsigned Part = 0; Part < UF; ++Part) {
4359     if (IsOrdered && Part > 0)
4360       break;
4361     Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part);
4362     Value *Val = State.get(PhiR->getBackedgeValue(), Part);
4363     if (IsOrdered)
4364       Val = State.get(PhiR->getBackedgeValue(), UF - 1);
4365 
4366     cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch);
4367   }
4368 
4369   // Before each round, move the insertion point right between
4370   // the PHIs and the values we are going to write.
4371   // This allows us to write both PHINodes and the extractelement
4372   // instructions.
4373   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4374 
4375   setDebugLocFromInst(Builder, LoopExitInst);
4376 
4377   Type *PhiTy = OrigPhi->getType();
4378   // If tail is folded by masking, the vector value to leave the loop should be
4379   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4380   // instead of the former. For an inloop reduction the reduction will already
4381   // be predicated, and does not need to be handled here.
4382   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4383     for (unsigned Part = 0; Part < UF; ++Part) {
4384       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4385       Value *Sel = nullptr;
4386       for (User *U : VecLoopExitInst->users()) {
4387         if (isa<SelectInst>(U)) {
4388           assert(!Sel && "Reduction exit feeding two selects");
4389           Sel = U;
4390         } else
4391           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4392       }
4393       assert(Sel && "Reduction exit feeds no select");
4394       State.reset(LoopExitInstDef, Sel, Part);
4395 
4396       // If the target can create a predicated operator for the reduction at no
4397       // extra cost in the loop (for example a predicated vadd), it can be
4398       // cheaper for the select to remain in the loop than be sunk out of it,
4399       // and so use the select value for the phi instead of the old
4400       // LoopExitValue.
4401       if (PreferPredicatedReductionSelect ||
4402           TTI->preferPredicatedReductionSelect(
4403               RdxDesc.getOpcode(), PhiTy,
4404               TargetTransformInfo::ReductionFlags())) {
4405         auto *VecRdxPhi =
4406             cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part));
4407         VecRdxPhi->setIncomingValueForBlock(
4408             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4409       }
4410     }
4411   }
4412 
4413   // If the vector reduction can be performed in a smaller type, we truncate
4414   // then extend the loop exit value to enable InstCombine to evaluate the
4415   // entire expression in the smaller type.
4416   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4417     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4418     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4419     Builder.SetInsertPoint(
4420         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4421     VectorParts RdxParts(UF);
4422     for (unsigned Part = 0; Part < UF; ++Part) {
4423       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4424       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4425       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4426                                         : Builder.CreateZExt(Trunc, VecTy);
4427       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4428            UI != RdxParts[Part]->user_end();)
4429         if (*UI != Trunc) {
4430           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4431           RdxParts[Part] = Extnd;
4432         } else {
4433           ++UI;
4434         }
4435     }
4436     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4437     for (unsigned Part = 0; Part < UF; ++Part) {
4438       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4439       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4440     }
4441   }
4442 
4443   // Reduce all of the unrolled parts into a single vector.
4444   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4445   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4446 
4447   // The middle block terminator has already been assigned a DebugLoc here (the
4448   // OrigLoop's single latch terminator). We want the whole middle block to
4449   // appear to execute on this line because: (a) it is all compiler generated,
4450   // (b) these instructions are always executed after evaluating the latch
4451   // conditional branch, and (c) other passes may add new predecessors which
4452   // terminate on this line. This is the easiest way to ensure we don't
4453   // accidentally cause an extra step back into the loop while debugging.
4454   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4455   if (IsOrdered)
4456     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4457   else {
4458     // Floating-point operations should have some FMF to enable the reduction.
4459     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4460     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4461     for (unsigned Part = 1; Part < UF; ++Part) {
4462       Value *RdxPart = State.get(LoopExitInstDef, Part);
4463       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4464         ReducedPartRdx = Builder.CreateBinOp(
4465             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4466       } else {
4467         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4468       }
4469     }
4470   }
4471 
4472   // Create the reduction after the loop. Note that inloop reductions create the
4473   // target reduction in the loop using a Reduction recipe.
4474   if (VF.isVector() && !IsInLoopReductionPhi) {
4475     ReducedPartRdx =
4476         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4477     // If the reduction can be performed in a smaller type, we need to extend
4478     // the reduction to the wider type before we branch to the original loop.
4479     if (PhiTy != RdxDesc.getRecurrenceType())
4480       ReducedPartRdx = RdxDesc.isSigned()
4481                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4482                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4483   }
4484 
4485   // Create a phi node that merges control-flow from the backedge-taken check
4486   // block and the middle block.
4487   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4488                                         LoopScalarPreHeader->getTerminator());
4489   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4490     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4491   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4492 
4493   // Now, we need to fix the users of the reduction variable
4494   // inside and outside of the scalar remainder loop.
4495 
4496   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4497   // in the exit blocks.  See comment on analogous loop in
4498   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4499   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4500     if (any_of(LCSSAPhi.incoming_values(),
4501                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4502       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4503 
4504   // Fix the scalar loop reduction variable with the incoming reduction sum
4505   // from the vector body and from the backedge value.
4506   int IncomingEdgeBlockIdx =
4507       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4508   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4509   // Pick the other block.
4510   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4511   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4512   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4513 }
4514 
4515 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4516                                                   VPTransformState &State) {
4517   RecurKind RK = RdxDesc.getRecurrenceKind();
4518   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4519     return;
4520 
4521   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4522   assert(LoopExitInstr && "null loop exit instruction");
4523   SmallVector<Instruction *, 8> Worklist;
4524   SmallPtrSet<Instruction *, 8> Visited;
4525   Worklist.push_back(LoopExitInstr);
4526   Visited.insert(LoopExitInstr);
4527 
4528   while (!Worklist.empty()) {
4529     Instruction *Cur = Worklist.pop_back_val();
4530     if (isa<OverflowingBinaryOperator>(Cur))
4531       for (unsigned Part = 0; Part < UF; ++Part) {
4532         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4533         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4534       }
4535 
4536     for (User *U : Cur->users()) {
4537       Instruction *UI = cast<Instruction>(U);
4538       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4539           Visited.insert(UI).second)
4540         Worklist.push_back(UI);
4541     }
4542   }
4543 }
4544 
4545 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4546   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4547     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4548       // Some phis were already hand updated by the reduction and recurrence
4549       // code above, leave them alone.
4550       continue;
4551 
4552     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4553     // Non-instruction incoming values will have only one value.
4554 
4555     VPLane Lane = VPLane::getFirstLane();
4556     if (isa<Instruction>(IncomingValue) &&
4557         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4558                                            VF))
4559       Lane = VPLane::getLastLaneForVF(VF);
4560 
4561     // Can be a loop invariant incoming value or the last scalar value to be
4562     // extracted from the vectorized loop.
4563     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4564     Value *lastIncomingValue =
4565         OrigLoop->isLoopInvariant(IncomingValue)
4566             ? IncomingValue
4567             : State.get(State.Plan->getVPValue(IncomingValue),
4568                         VPIteration(UF - 1, Lane));
4569     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4570   }
4571 }
4572 
4573 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4574   // The basic block and loop containing the predicated instruction.
4575   auto *PredBB = PredInst->getParent();
4576   auto *VectorLoop = LI->getLoopFor(PredBB);
4577 
4578   // Initialize a worklist with the operands of the predicated instruction.
4579   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4580 
4581   // Holds instructions that we need to analyze again. An instruction may be
4582   // reanalyzed if we don't yet know if we can sink it or not.
4583   SmallVector<Instruction *, 8> InstsToReanalyze;
4584 
4585   // Returns true if a given use occurs in the predicated block. Phi nodes use
4586   // their operands in their corresponding predecessor blocks.
4587   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4588     auto *I = cast<Instruction>(U.getUser());
4589     BasicBlock *BB = I->getParent();
4590     if (auto *Phi = dyn_cast<PHINode>(I))
4591       BB = Phi->getIncomingBlock(
4592           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4593     return BB == PredBB;
4594   };
4595 
4596   // Iteratively sink the scalarized operands of the predicated instruction
4597   // into the block we created for it. When an instruction is sunk, it's
4598   // operands are then added to the worklist. The algorithm ends after one pass
4599   // through the worklist doesn't sink a single instruction.
4600   bool Changed;
4601   do {
4602     // Add the instructions that need to be reanalyzed to the worklist, and
4603     // reset the changed indicator.
4604     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4605     InstsToReanalyze.clear();
4606     Changed = false;
4607 
4608     while (!Worklist.empty()) {
4609       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4610 
4611       // We can't sink an instruction if it is a phi node, is not in the loop,
4612       // or may have side effects.
4613       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4614           I->mayHaveSideEffects())
4615         continue;
4616 
4617       // If the instruction is already in PredBB, check if we can sink its
4618       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4619       // sinking the scalar instruction I, hence it appears in PredBB; but it
4620       // may have failed to sink I's operands (recursively), which we try
4621       // (again) here.
4622       if (I->getParent() == PredBB) {
4623         Worklist.insert(I->op_begin(), I->op_end());
4624         continue;
4625       }
4626 
4627       // It's legal to sink the instruction if all its uses occur in the
4628       // predicated block. Otherwise, there's nothing to do yet, and we may
4629       // need to reanalyze the instruction.
4630       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4631         InstsToReanalyze.push_back(I);
4632         continue;
4633       }
4634 
4635       // Move the instruction to the beginning of the predicated block, and add
4636       // it's operands to the worklist.
4637       I->moveBefore(&*PredBB->getFirstInsertionPt());
4638       Worklist.insert(I->op_begin(), I->op_end());
4639 
4640       // The sinking may have enabled other instructions to be sunk, so we will
4641       // need to iterate.
4642       Changed = true;
4643     }
4644   } while (Changed);
4645 }
4646 
4647 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4648   for (PHINode *OrigPhi : OrigPHIsToFix) {
4649     VPWidenPHIRecipe *VPPhi =
4650         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4651     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4652     // Make sure the builder has a valid insert point.
4653     Builder.SetInsertPoint(NewPhi);
4654     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4655       VPValue *Inc = VPPhi->getIncomingValue(i);
4656       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4657       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4658     }
4659   }
4660 }
4661 
4662 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4663   return Cost->useOrderedReductions(RdxDesc);
4664 }
4665 
4666 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4667                                    VPUser &Operands, unsigned UF,
4668                                    ElementCount VF, bool IsPtrLoopInvariant,
4669                                    SmallBitVector &IsIndexLoopInvariant,
4670                                    VPTransformState &State) {
4671   // Construct a vector GEP by widening the operands of the scalar GEP as
4672   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4673   // results in a vector of pointers when at least one operand of the GEP
4674   // is vector-typed. Thus, to keep the representation compact, we only use
4675   // vector-typed operands for loop-varying values.
4676 
4677   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4678     // If we are vectorizing, but the GEP has only loop-invariant operands,
4679     // the GEP we build (by only using vector-typed operands for
4680     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4681     // produce a vector of pointers, we need to either arbitrarily pick an
4682     // operand to broadcast, or broadcast a clone of the original GEP.
4683     // Here, we broadcast a clone of the original.
4684     //
4685     // TODO: If at some point we decide to scalarize instructions having
4686     //       loop-invariant operands, this special case will no longer be
4687     //       required. We would add the scalarization decision to
4688     //       collectLoopScalars() and teach getVectorValue() to broadcast
4689     //       the lane-zero scalar value.
4690     auto *Clone = Builder.Insert(GEP->clone());
4691     for (unsigned Part = 0; Part < UF; ++Part) {
4692       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4693       State.set(VPDef, EntryPart, Part);
4694       addMetadata(EntryPart, GEP);
4695     }
4696   } else {
4697     // If the GEP has at least one loop-varying operand, we are sure to
4698     // produce a vector of pointers. But if we are only unrolling, we want
4699     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4700     // produce with the code below will be scalar (if VF == 1) or vector
4701     // (otherwise). Note that for the unroll-only case, we still maintain
4702     // values in the vector mapping with initVector, as we do for other
4703     // instructions.
4704     for (unsigned Part = 0; Part < UF; ++Part) {
4705       // The pointer operand of the new GEP. If it's loop-invariant, we
4706       // won't broadcast it.
4707       auto *Ptr = IsPtrLoopInvariant
4708                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4709                       : State.get(Operands.getOperand(0), Part);
4710 
4711       // Collect all the indices for the new GEP. If any index is
4712       // loop-invariant, we won't broadcast it.
4713       SmallVector<Value *, 4> Indices;
4714       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4715         VPValue *Operand = Operands.getOperand(I);
4716         if (IsIndexLoopInvariant[I - 1])
4717           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4718         else
4719           Indices.push_back(State.get(Operand, Part));
4720       }
4721 
4722       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4723       // but it should be a vector, otherwise.
4724       auto *NewGEP =
4725           GEP->isInBounds()
4726               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4727                                           Indices)
4728               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4729       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4730              "NewGEP is not a pointer vector");
4731       State.set(VPDef, NewGEP, Part);
4732       addMetadata(NewGEP, GEP);
4733     }
4734   }
4735 }
4736 
4737 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4738                                               RecurrenceDescriptor *RdxDesc,
4739                                               VPWidenPHIRecipe *PhiR,
4740                                               VPTransformState &State) {
4741   PHINode *P = cast<PHINode>(PN);
4742   if (EnableVPlanNativePath) {
4743     // Currently we enter here in the VPlan-native path for non-induction
4744     // PHIs where all control flow is uniform. We simply widen these PHIs.
4745     // Create a vector phi with no operands - the vector phi operands will be
4746     // set at the end of vector code generation.
4747     Type *VecTy = (State.VF.isScalar())
4748                       ? PN->getType()
4749                       : VectorType::get(PN->getType(), State.VF);
4750     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4751     State.set(PhiR, VecPhi, 0);
4752     OrigPHIsToFix.push_back(P);
4753 
4754     return;
4755   }
4756 
4757   assert(PN->getParent() == OrigLoop->getHeader() &&
4758          "Non-header phis should have been handled elsewhere");
4759 
4760   VPValue *StartVPV = PhiR->getStartValue();
4761   Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr;
4762   // In order to support recurrences we need to be able to vectorize Phi nodes.
4763   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4764   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4765   // this value when we vectorize all of the instructions that use the PHI.
4766   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4767     Value *Iden = nullptr;
4768     bool ScalarPHI =
4769         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4770     Type *VecTy =
4771         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4772 
4773     if (RdxDesc) {
4774       assert(Legal->isReductionVariable(P) && StartV &&
4775              "RdxDesc should only be set for reduction variables; in that case "
4776              "a StartV is also required");
4777       RecurKind RK = RdxDesc->getRecurrenceKind();
4778       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4779         // MinMax reduction have the start value as their identify.
4780         if (ScalarPHI) {
4781           Iden = StartV;
4782         } else {
4783           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4784           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4785           StartV = Iden =
4786               Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4787         }
4788       } else {
4789         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4790             RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags());
4791         Iden = IdenC;
4792 
4793         if (!ScalarPHI) {
4794           Iden = ConstantVector::getSplat(State.VF, IdenC);
4795           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4796           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4797           Constant *Zero = Builder.getInt32(0);
4798           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4799         }
4800       }
4801     }
4802 
4803     bool IsOrdered = State.VF.isVector() &&
4804                      Cost->isInLoopReduction(cast<PHINode>(PN)) &&
4805                      Cost->useOrderedReductions(*RdxDesc);
4806 
4807     for (unsigned Part = 0; Part < State.UF; ++Part) {
4808       // This is phase one of vectorizing PHIs.
4809       if (Part > 0 && IsOrdered)
4810         return;
4811       Value *EntryPart = PHINode::Create(
4812           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4813       State.set(PhiR, EntryPart, Part);
4814       if (StartV) {
4815         // Make sure to add the reduction start value only to the
4816         // first unroll part.
4817         Value *StartVal = (Part == 0) ? StartV : Iden;
4818         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4819       }
4820     }
4821     return;
4822   }
4823 
4824   assert(!Legal->isReductionVariable(P) &&
4825          "reductions should be handled above");
4826 
4827   setDebugLocFromInst(Builder, P);
4828 
4829   // This PHINode must be an induction variable.
4830   // Make sure that we know about it.
4831   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4832 
4833   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4834   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4835 
4836   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4837   // which can be found from the original scalar operations.
4838   switch (II.getKind()) {
4839   case InductionDescriptor::IK_NoInduction:
4840     llvm_unreachable("Unknown induction");
4841   case InductionDescriptor::IK_IntInduction:
4842   case InductionDescriptor::IK_FpInduction:
4843     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4844   case InductionDescriptor::IK_PtrInduction: {
4845     // Handle the pointer induction variable case.
4846     assert(P->getType()->isPointerTy() && "Unexpected type.");
4847 
4848     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4849       // This is the normalized GEP that starts counting at zero.
4850       Value *PtrInd =
4851           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4852       // Determine the number of scalars we need to generate for each unroll
4853       // iteration. If the instruction is uniform, we only need to generate the
4854       // first lane. Otherwise, we generate all VF values.
4855       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4856       unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
4857 
4858       bool NeedsVectorIndex = !IsUniform && VF.isScalable();
4859       Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr;
4860       if (NeedsVectorIndex) {
4861         Type *VecIVTy = VectorType::get(PtrInd->getType(), VF);
4862         UnitStepVec = Builder.CreateStepVector(VecIVTy);
4863         PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd);
4864       }
4865 
4866       for (unsigned Part = 0; Part < UF; ++Part) {
4867         Value *PartStart = createStepForVF(
4868             Builder, ConstantInt::get(PtrInd->getType(), Part), VF);
4869 
4870         if (NeedsVectorIndex) {
4871           Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart);
4872           Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec);
4873           Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices);
4874           Value *SclrGep =
4875               emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II);
4876           SclrGep->setName("next.gep");
4877           State.set(PhiR, SclrGep, Part);
4878           // We've cached the whole vector, which means we can support the
4879           // extraction of any lane.
4880           continue;
4881         }
4882 
4883         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4884           Value *Idx = Builder.CreateAdd(
4885               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4886           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4887           Value *SclrGep =
4888               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4889           SclrGep->setName("next.gep");
4890           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4891         }
4892       }
4893       return;
4894     }
4895     assert(isa<SCEVConstant>(II.getStep()) &&
4896            "Induction step not a SCEV constant!");
4897     Type *PhiType = II.getStep()->getType();
4898 
4899     // Build a pointer phi
4900     Value *ScalarStartValue = II.getStartValue();
4901     Type *ScStValueType = ScalarStartValue->getType();
4902     PHINode *NewPointerPhi =
4903         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4904     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4905 
4906     // A pointer induction, performed by using a gep
4907     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4908     Instruction *InductionLoc = LoopLatch->getTerminator();
4909     const SCEV *ScalarStep = II.getStep();
4910     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4911     Value *ScalarStepValue =
4912         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4913     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4914     Value *NumUnrolledElems =
4915         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4916     Value *InductionGEP = GetElementPtrInst::Create(
4917         ScStValueType->getPointerElementType(), NewPointerPhi,
4918         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4919         InductionLoc);
4920     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4921 
4922     // Create UF many actual address geps that use the pointer
4923     // phi as base and a vectorized version of the step value
4924     // (<step*0, ..., step*N>) as offset.
4925     for (unsigned Part = 0; Part < State.UF; ++Part) {
4926       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4927       Value *StartOffsetScalar =
4928           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4929       Value *StartOffset =
4930           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4931       // Create a vector of consecutive numbers from zero to VF.
4932       StartOffset =
4933           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4934 
4935       Value *GEP = Builder.CreateGEP(
4936           ScStValueType->getPointerElementType(), NewPointerPhi,
4937           Builder.CreateMul(
4938               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4939               "vector.gep"));
4940       State.set(PhiR, GEP, Part);
4941     }
4942   }
4943   }
4944 }
4945 
4946 /// A helper function for checking whether an integer division-related
4947 /// instruction may divide by zero (in which case it must be predicated if
4948 /// executed conditionally in the scalar code).
4949 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4950 /// Non-zero divisors that are non compile-time constants will not be
4951 /// converted into multiplication, so we will still end up scalarizing
4952 /// the division, but can do so w/o predication.
4953 static bool mayDivideByZero(Instruction &I) {
4954   assert((I.getOpcode() == Instruction::UDiv ||
4955           I.getOpcode() == Instruction::SDiv ||
4956           I.getOpcode() == Instruction::URem ||
4957           I.getOpcode() == Instruction::SRem) &&
4958          "Unexpected instruction");
4959   Value *Divisor = I.getOperand(1);
4960   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4961   return !CInt || CInt->isZero();
4962 }
4963 
4964 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4965                                            VPUser &User,
4966                                            VPTransformState &State) {
4967   switch (I.getOpcode()) {
4968   case Instruction::Call:
4969   case Instruction::Br:
4970   case Instruction::PHI:
4971   case Instruction::GetElementPtr:
4972   case Instruction::Select:
4973     llvm_unreachable("This instruction is handled by a different recipe.");
4974   case Instruction::UDiv:
4975   case Instruction::SDiv:
4976   case Instruction::SRem:
4977   case Instruction::URem:
4978   case Instruction::Add:
4979   case Instruction::FAdd:
4980   case Instruction::Sub:
4981   case Instruction::FSub:
4982   case Instruction::FNeg:
4983   case Instruction::Mul:
4984   case Instruction::FMul:
4985   case Instruction::FDiv:
4986   case Instruction::FRem:
4987   case Instruction::Shl:
4988   case Instruction::LShr:
4989   case Instruction::AShr:
4990   case Instruction::And:
4991   case Instruction::Or:
4992   case Instruction::Xor: {
4993     // Just widen unops and binops.
4994     setDebugLocFromInst(Builder, &I);
4995 
4996     for (unsigned Part = 0; Part < UF; ++Part) {
4997       SmallVector<Value *, 2> Ops;
4998       for (VPValue *VPOp : User.operands())
4999         Ops.push_back(State.get(VPOp, Part));
5000 
5001       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
5002 
5003       if (auto *VecOp = dyn_cast<Instruction>(V))
5004         VecOp->copyIRFlags(&I);
5005 
5006       // Use this vector value for all users of the original instruction.
5007       State.set(Def, V, Part);
5008       addMetadata(V, &I);
5009     }
5010 
5011     break;
5012   }
5013   case Instruction::ICmp:
5014   case Instruction::FCmp: {
5015     // Widen compares. Generate vector compares.
5016     bool FCmp = (I.getOpcode() == Instruction::FCmp);
5017     auto *Cmp = cast<CmpInst>(&I);
5018     setDebugLocFromInst(Builder, Cmp);
5019     for (unsigned Part = 0; Part < UF; ++Part) {
5020       Value *A = State.get(User.getOperand(0), Part);
5021       Value *B = State.get(User.getOperand(1), Part);
5022       Value *C = nullptr;
5023       if (FCmp) {
5024         // Propagate fast math flags.
5025         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
5026         Builder.setFastMathFlags(Cmp->getFastMathFlags());
5027         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
5028       } else {
5029         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
5030       }
5031       State.set(Def, C, Part);
5032       addMetadata(C, &I);
5033     }
5034 
5035     break;
5036   }
5037 
5038   case Instruction::ZExt:
5039   case Instruction::SExt:
5040   case Instruction::FPToUI:
5041   case Instruction::FPToSI:
5042   case Instruction::FPExt:
5043   case Instruction::PtrToInt:
5044   case Instruction::IntToPtr:
5045   case Instruction::SIToFP:
5046   case Instruction::UIToFP:
5047   case Instruction::Trunc:
5048   case Instruction::FPTrunc:
5049   case Instruction::BitCast: {
5050     auto *CI = cast<CastInst>(&I);
5051     setDebugLocFromInst(Builder, CI);
5052 
5053     /// Vectorize casts.
5054     Type *DestTy =
5055         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
5056 
5057     for (unsigned Part = 0; Part < UF; ++Part) {
5058       Value *A = State.get(User.getOperand(0), Part);
5059       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
5060       State.set(Def, Cast, Part);
5061       addMetadata(Cast, &I);
5062     }
5063     break;
5064   }
5065   default:
5066     // This instruction is not vectorized by simple widening.
5067     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
5068     llvm_unreachable("Unhandled instruction!");
5069   } // end of switch.
5070 }
5071 
5072 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
5073                                                VPUser &ArgOperands,
5074                                                VPTransformState &State) {
5075   assert(!isa<DbgInfoIntrinsic>(I) &&
5076          "DbgInfoIntrinsic should have been dropped during VPlan construction");
5077   setDebugLocFromInst(Builder, &I);
5078 
5079   Module *M = I.getParent()->getParent()->getParent();
5080   auto *CI = cast<CallInst>(&I);
5081 
5082   SmallVector<Type *, 4> Tys;
5083   for (Value *ArgOperand : CI->arg_operands())
5084     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
5085 
5086   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5087 
5088   // The flag shows whether we use Intrinsic or a usual Call for vectorized
5089   // version of the instruction.
5090   // Is it beneficial to perform intrinsic call compared to lib call?
5091   bool NeedToScalarize = false;
5092   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
5093   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
5094   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
5095   assert((UseVectorIntrinsic || !NeedToScalarize) &&
5096          "Instruction should be scalarized elsewhere.");
5097   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
5098          "Either the intrinsic cost or vector call cost must be valid");
5099 
5100   for (unsigned Part = 0; Part < UF; ++Part) {
5101     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
5102     SmallVector<Value *, 4> Args;
5103     for (auto &I : enumerate(ArgOperands.operands())) {
5104       // Some intrinsics have a scalar argument - don't replace it with a
5105       // vector.
5106       Value *Arg;
5107       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
5108         Arg = State.get(I.value(), Part);
5109       else {
5110         Arg = State.get(I.value(), VPIteration(0, 0));
5111         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
5112           TysForDecl.push_back(Arg->getType());
5113       }
5114       Args.push_back(Arg);
5115     }
5116 
5117     Function *VectorF;
5118     if (UseVectorIntrinsic) {
5119       // Use vector version of the intrinsic.
5120       if (VF.isVector())
5121         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5122       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5123       assert(VectorF && "Can't retrieve vector intrinsic.");
5124     } else {
5125       // Use vector version of the function call.
5126       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
5127 #ifndef NDEBUG
5128       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
5129              "Can't create vector function.");
5130 #endif
5131         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
5132     }
5133       SmallVector<OperandBundleDef, 1> OpBundles;
5134       CI->getOperandBundlesAsDefs(OpBundles);
5135       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5136 
5137       if (isa<FPMathOperator>(V))
5138         V->copyFastMathFlags(CI);
5139 
5140       State.set(Def, V, Part);
5141       addMetadata(V, &I);
5142   }
5143 }
5144 
5145 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5146                                                  VPUser &Operands,
5147                                                  bool InvariantCond,
5148                                                  VPTransformState &State) {
5149   setDebugLocFromInst(Builder, &I);
5150 
5151   // The condition can be loop invariant  but still defined inside the
5152   // loop. This means that we can't just use the original 'cond' value.
5153   // We have to take the 'vectorized' value and pick the first lane.
5154   // Instcombine will make this a no-op.
5155   auto *InvarCond = InvariantCond
5156                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5157                         : nullptr;
5158 
5159   for (unsigned Part = 0; Part < UF; ++Part) {
5160     Value *Cond =
5161         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5162     Value *Op0 = State.get(Operands.getOperand(1), Part);
5163     Value *Op1 = State.get(Operands.getOperand(2), Part);
5164     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5165     State.set(VPDef, Sel, Part);
5166     addMetadata(Sel, &I);
5167   }
5168 }
5169 
5170 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5171   // We should not collect Scalars more than once per VF. Right now, this
5172   // function is called from collectUniformsAndScalars(), which already does
5173   // this check. Collecting Scalars for VF=1 does not make any sense.
5174   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5175          "This function should not be visited twice for the same VF");
5176 
5177   SmallSetVector<Instruction *, 8> Worklist;
5178 
5179   // These sets are used to seed the analysis with pointers used by memory
5180   // accesses that will remain scalar.
5181   SmallSetVector<Instruction *, 8> ScalarPtrs;
5182   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5183   auto *Latch = TheLoop->getLoopLatch();
5184 
5185   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5186   // The pointer operands of loads and stores will be scalar as long as the
5187   // memory access is not a gather or scatter operation. The value operand of a
5188   // store will remain scalar if the store is scalarized.
5189   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5190     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5191     assert(WideningDecision != CM_Unknown &&
5192            "Widening decision should be ready at this moment");
5193     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5194       if (Ptr == Store->getValueOperand())
5195         return WideningDecision == CM_Scalarize;
5196     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5197            "Ptr is neither a value or pointer operand");
5198     return WideningDecision != CM_GatherScatter;
5199   };
5200 
5201   // A helper that returns true if the given value is a bitcast or
5202   // getelementptr instruction contained in the loop.
5203   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5204     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5205             isa<GetElementPtrInst>(V)) &&
5206            !TheLoop->isLoopInvariant(V);
5207   };
5208 
5209   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5210     if (!isa<PHINode>(Ptr) ||
5211         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5212       return false;
5213     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5214     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5215       return false;
5216     return isScalarUse(MemAccess, Ptr);
5217   };
5218 
5219   // A helper that evaluates a memory access's use of a pointer. If the
5220   // pointer is actually the pointer induction of a loop, it is being
5221   // inserted into Worklist. If the use will be a scalar use, and the
5222   // pointer is only used by memory accesses, we place the pointer in
5223   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5224   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5225     if (isScalarPtrInduction(MemAccess, Ptr)) {
5226       Worklist.insert(cast<Instruction>(Ptr));
5227       Instruction *Update = cast<Instruction>(
5228           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5229       Worklist.insert(Update);
5230       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5231                         << "\n");
5232       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5233                         << "\n");
5234       return;
5235     }
5236     // We only care about bitcast and getelementptr instructions contained in
5237     // the loop.
5238     if (!isLoopVaryingBitCastOrGEP(Ptr))
5239       return;
5240 
5241     // If the pointer has already been identified as scalar (e.g., if it was
5242     // also identified as uniform), there's nothing to do.
5243     auto *I = cast<Instruction>(Ptr);
5244     if (Worklist.count(I))
5245       return;
5246 
5247     // If the use of the pointer will be a scalar use, and all users of the
5248     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5249     // place the pointer in PossibleNonScalarPtrs.
5250     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5251           return isa<LoadInst>(U) || isa<StoreInst>(U);
5252         }))
5253       ScalarPtrs.insert(I);
5254     else
5255       PossibleNonScalarPtrs.insert(I);
5256   };
5257 
5258   // We seed the scalars analysis with three classes of instructions: (1)
5259   // instructions marked uniform-after-vectorization and (2) bitcast,
5260   // getelementptr and (pointer) phi instructions used by memory accesses
5261   // requiring a scalar use.
5262   //
5263   // (1) Add to the worklist all instructions that have been identified as
5264   // uniform-after-vectorization.
5265   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5266 
5267   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5268   // memory accesses requiring a scalar use. The pointer operands of loads and
5269   // stores will be scalar as long as the memory accesses is not a gather or
5270   // scatter operation. The value operand of a store will remain scalar if the
5271   // store is scalarized.
5272   for (auto *BB : TheLoop->blocks())
5273     for (auto &I : *BB) {
5274       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5275         evaluatePtrUse(Load, Load->getPointerOperand());
5276       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5277         evaluatePtrUse(Store, Store->getPointerOperand());
5278         evaluatePtrUse(Store, Store->getValueOperand());
5279       }
5280     }
5281   for (auto *I : ScalarPtrs)
5282     if (!PossibleNonScalarPtrs.count(I)) {
5283       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5284       Worklist.insert(I);
5285     }
5286 
5287   // Insert the forced scalars.
5288   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5289   // induction variable when the PHI user is scalarized.
5290   auto ForcedScalar = ForcedScalars.find(VF);
5291   if (ForcedScalar != ForcedScalars.end())
5292     for (auto *I : ForcedScalar->second)
5293       Worklist.insert(I);
5294 
5295   // Expand the worklist by looking through any bitcasts and getelementptr
5296   // instructions we've already identified as scalar. This is similar to the
5297   // expansion step in collectLoopUniforms(); however, here we're only
5298   // expanding to include additional bitcasts and getelementptr instructions.
5299   unsigned Idx = 0;
5300   while (Idx != Worklist.size()) {
5301     Instruction *Dst = Worklist[Idx++];
5302     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5303       continue;
5304     auto *Src = cast<Instruction>(Dst->getOperand(0));
5305     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5306           auto *J = cast<Instruction>(U);
5307           return !TheLoop->contains(J) || Worklist.count(J) ||
5308                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5309                   isScalarUse(J, Src));
5310         })) {
5311       Worklist.insert(Src);
5312       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5313     }
5314   }
5315 
5316   // An induction variable will remain scalar if all users of the induction
5317   // variable and induction variable update remain scalar.
5318   for (auto &Induction : Legal->getInductionVars()) {
5319     auto *Ind = Induction.first;
5320     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5321 
5322     // If tail-folding is applied, the primary induction variable will be used
5323     // to feed a vector compare.
5324     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5325       continue;
5326 
5327     // Determine if all users of the induction variable are scalar after
5328     // vectorization.
5329     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5330       auto *I = cast<Instruction>(U);
5331       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5332     });
5333     if (!ScalarInd)
5334       continue;
5335 
5336     // Determine if all users of the induction variable update instruction are
5337     // scalar after vectorization.
5338     auto ScalarIndUpdate =
5339         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5340           auto *I = cast<Instruction>(U);
5341           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5342         });
5343     if (!ScalarIndUpdate)
5344       continue;
5345 
5346     // The induction variable and its update instruction will remain scalar.
5347     Worklist.insert(Ind);
5348     Worklist.insert(IndUpdate);
5349     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5350     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5351                       << "\n");
5352   }
5353 
5354   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5355 }
5356 
5357 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
5358   if (!blockNeedsPredication(I->getParent()))
5359     return false;
5360   switch(I->getOpcode()) {
5361   default:
5362     break;
5363   case Instruction::Load:
5364   case Instruction::Store: {
5365     if (!Legal->isMaskRequired(I))
5366       return false;
5367     auto *Ptr = getLoadStorePointerOperand(I);
5368     auto *Ty = getLoadStoreType(I);
5369     const Align Alignment = getLoadStoreAlignment(I);
5370     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5371                                 TTI.isLegalMaskedGather(Ty, Alignment))
5372                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5373                                 TTI.isLegalMaskedScatter(Ty, Alignment));
5374   }
5375   case Instruction::UDiv:
5376   case Instruction::SDiv:
5377   case Instruction::SRem:
5378   case Instruction::URem:
5379     return mayDivideByZero(*I);
5380   }
5381   return false;
5382 }
5383 
5384 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5385     Instruction *I, ElementCount VF) {
5386   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5387   assert(getWideningDecision(I, VF) == CM_Unknown &&
5388          "Decision should not be set yet.");
5389   auto *Group = getInterleavedAccessGroup(I);
5390   assert(Group && "Must have a group.");
5391 
5392   // If the instruction's allocated size doesn't equal it's type size, it
5393   // requires padding and will be scalarized.
5394   auto &DL = I->getModule()->getDataLayout();
5395   auto *ScalarTy = getLoadStoreType(I);
5396   if (hasIrregularType(ScalarTy, DL))
5397     return false;
5398 
5399   // Check if masking is required.
5400   // A Group may need masking for one of two reasons: it resides in a block that
5401   // needs predication, or it was decided to use masking to deal with gaps.
5402   bool PredicatedAccessRequiresMasking =
5403       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5404   bool AccessWithGapsRequiresMasking =
5405       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5406   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5407     return true;
5408 
5409   // If masked interleaving is required, we expect that the user/target had
5410   // enabled it, because otherwise it either wouldn't have been created or
5411   // it should have been invalidated by the CostModel.
5412   assert(useMaskedInterleavedAccesses(TTI) &&
5413          "Masked interleave-groups for predicated accesses are not enabled.");
5414 
5415   auto *Ty = getLoadStoreType(I);
5416   const Align Alignment = getLoadStoreAlignment(I);
5417   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5418                           : TTI.isLegalMaskedStore(Ty, Alignment);
5419 }
5420 
5421 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5422     Instruction *I, ElementCount VF) {
5423   // Get and ensure we have a valid memory instruction.
5424   LoadInst *LI = dyn_cast<LoadInst>(I);
5425   StoreInst *SI = dyn_cast<StoreInst>(I);
5426   assert((LI || SI) && "Invalid memory instruction");
5427 
5428   auto *Ptr = getLoadStorePointerOperand(I);
5429 
5430   // In order to be widened, the pointer should be consecutive, first of all.
5431   if (!Legal->isConsecutivePtr(Ptr))
5432     return false;
5433 
5434   // If the instruction is a store located in a predicated block, it will be
5435   // scalarized.
5436   if (isScalarWithPredication(I))
5437     return false;
5438 
5439   // If the instruction's allocated size doesn't equal it's type size, it
5440   // requires padding and will be scalarized.
5441   auto &DL = I->getModule()->getDataLayout();
5442   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5443   if (hasIrregularType(ScalarTy, DL))
5444     return false;
5445 
5446   return true;
5447 }
5448 
5449 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5450   // We should not collect Uniforms more than once per VF. Right now,
5451   // this function is called from collectUniformsAndScalars(), which
5452   // already does this check. Collecting Uniforms for VF=1 does not make any
5453   // sense.
5454 
5455   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5456          "This function should not be visited twice for the same VF");
5457 
5458   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5459   // not analyze again.  Uniforms.count(VF) will return 1.
5460   Uniforms[VF].clear();
5461 
5462   // We now know that the loop is vectorizable!
5463   // Collect instructions inside the loop that will remain uniform after
5464   // vectorization.
5465 
5466   // Global values, params and instructions outside of current loop are out of
5467   // scope.
5468   auto isOutOfScope = [&](Value *V) -> bool {
5469     Instruction *I = dyn_cast<Instruction>(V);
5470     return (!I || !TheLoop->contains(I));
5471   };
5472 
5473   SetVector<Instruction *> Worklist;
5474   BasicBlock *Latch = TheLoop->getLoopLatch();
5475 
5476   // Instructions that are scalar with predication must not be considered
5477   // uniform after vectorization, because that would create an erroneous
5478   // replicating region where only a single instance out of VF should be formed.
5479   // TODO: optimize such seldom cases if found important, see PR40816.
5480   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5481     if (isOutOfScope(I)) {
5482       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5483                         << *I << "\n");
5484       return;
5485     }
5486     if (isScalarWithPredication(I)) {
5487       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5488                         << *I << "\n");
5489       return;
5490     }
5491     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5492     Worklist.insert(I);
5493   };
5494 
5495   // Start with the conditional branch. If the branch condition is an
5496   // instruction contained in the loop that is only used by the branch, it is
5497   // uniform.
5498   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5499   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5500     addToWorklistIfAllowed(Cmp);
5501 
5502   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5503     InstWidening WideningDecision = getWideningDecision(I, VF);
5504     assert(WideningDecision != CM_Unknown &&
5505            "Widening decision should be ready at this moment");
5506 
5507     // A uniform memory op is itself uniform.  We exclude uniform stores
5508     // here as they demand the last lane, not the first one.
5509     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5510       assert(WideningDecision == CM_Scalarize);
5511       return true;
5512     }
5513 
5514     return (WideningDecision == CM_Widen ||
5515             WideningDecision == CM_Widen_Reverse ||
5516             WideningDecision == CM_Interleave);
5517   };
5518 
5519 
5520   // Returns true if Ptr is the pointer operand of a memory access instruction
5521   // I, and I is known to not require scalarization.
5522   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5523     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5524   };
5525 
5526   // Holds a list of values which are known to have at least one uniform use.
5527   // Note that there may be other uses which aren't uniform.  A "uniform use"
5528   // here is something which only demands lane 0 of the unrolled iterations;
5529   // it does not imply that all lanes produce the same value (e.g. this is not
5530   // the usual meaning of uniform)
5531   SetVector<Value *> HasUniformUse;
5532 
5533   // Scan the loop for instructions which are either a) known to have only
5534   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5535   for (auto *BB : TheLoop->blocks())
5536     for (auto &I : *BB) {
5537       // If there's no pointer operand, there's nothing to do.
5538       auto *Ptr = getLoadStorePointerOperand(&I);
5539       if (!Ptr)
5540         continue;
5541 
5542       // A uniform memory op is itself uniform.  We exclude uniform stores
5543       // here as they demand the last lane, not the first one.
5544       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5545         addToWorklistIfAllowed(&I);
5546 
5547       if (isUniformDecision(&I, VF)) {
5548         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5549         HasUniformUse.insert(Ptr);
5550       }
5551     }
5552 
5553   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5554   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5555   // disallows uses outside the loop as well.
5556   for (auto *V : HasUniformUse) {
5557     if (isOutOfScope(V))
5558       continue;
5559     auto *I = cast<Instruction>(V);
5560     auto UsersAreMemAccesses =
5561       llvm::all_of(I->users(), [&](User *U) -> bool {
5562         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5563       });
5564     if (UsersAreMemAccesses)
5565       addToWorklistIfAllowed(I);
5566   }
5567 
5568   // Expand Worklist in topological order: whenever a new instruction
5569   // is added , its users should be already inside Worklist.  It ensures
5570   // a uniform instruction will only be used by uniform instructions.
5571   unsigned idx = 0;
5572   while (idx != Worklist.size()) {
5573     Instruction *I = Worklist[idx++];
5574 
5575     for (auto OV : I->operand_values()) {
5576       // isOutOfScope operands cannot be uniform instructions.
5577       if (isOutOfScope(OV))
5578         continue;
5579       // First order recurrence Phi's should typically be considered
5580       // non-uniform.
5581       auto *OP = dyn_cast<PHINode>(OV);
5582       if (OP && Legal->isFirstOrderRecurrence(OP))
5583         continue;
5584       // If all the users of the operand are uniform, then add the
5585       // operand into the uniform worklist.
5586       auto *OI = cast<Instruction>(OV);
5587       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5588             auto *J = cast<Instruction>(U);
5589             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5590           }))
5591         addToWorklistIfAllowed(OI);
5592     }
5593   }
5594 
5595   // For an instruction to be added into Worklist above, all its users inside
5596   // the loop should also be in Worklist. However, this condition cannot be
5597   // true for phi nodes that form a cyclic dependence. We must process phi
5598   // nodes separately. An induction variable will remain uniform if all users
5599   // of the induction variable and induction variable update remain uniform.
5600   // The code below handles both pointer and non-pointer induction variables.
5601   for (auto &Induction : Legal->getInductionVars()) {
5602     auto *Ind = Induction.first;
5603     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5604 
5605     // Determine if all users of the induction variable are uniform after
5606     // vectorization.
5607     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5608       auto *I = cast<Instruction>(U);
5609       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5610              isVectorizedMemAccessUse(I, Ind);
5611     });
5612     if (!UniformInd)
5613       continue;
5614 
5615     // Determine if all users of the induction variable update instruction are
5616     // uniform after vectorization.
5617     auto UniformIndUpdate =
5618         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5619           auto *I = cast<Instruction>(U);
5620           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5621                  isVectorizedMemAccessUse(I, IndUpdate);
5622         });
5623     if (!UniformIndUpdate)
5624       continue;
5625 
5626     // The induction variable and its update instruction will remain uniform.
5627     addToWorklistIfAllowed(Ind);
5628     addToWorklistIfAllowed(IndUpdate);
5629   }
5630 
5631   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5632 }
5633 
5634 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5635   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5636 
5637   if (Legal->getRuntimePointerChecking()->Need) {
5638     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5639         "runtime pointer checks needed. Enable vectorization of this "
5640         "loop with '#pragma clang loop vectorize(enable)' when "
5641         "compiling with -Os/-Oz",
5642         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5643     return true;
5644   }
5645 
5646   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5647     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5648         "runtime SCEV checks needed. Enable vectorization of this "
5649         "loop with '#pragma clang loop vectorize(enable)' when "
5650         "compiling with -Os/-Oz",
5651         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5652     return true;
5653   }
5654 
5655   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5656   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5657     reportVectorizationFailure("Runtime stride check for small trip count",
5658         "runtime stride == 1 checks needed. Enable vectorization of "
5659         "this loop without such check by compiling with -Os/-Oz",
5660         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5661     return true;
5662   }
5663 
5664   return false;
5665 }
5666 
5667 ElementCount
5668 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5669   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5670     reportVectorizationInfo(
5671         "Disabling scalable vectorization, because target does not "
5672         "support scalable vectors.",
5673         "ScalableVectorsUnsupported", ORE, TheLoop);
5674     return ElementCount::getScalable(0);
5675   }
5676 
5677   if (Hints->isScalableVectorizationDisabled()) {
5678     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5679                             "ScalableVectorizationDisabled", ORE, TheLoop);
5680     return ElementCount::getScalable(0);
5681   }
5682 
5683   auto MaxScalableVF = ElementCount::getScalable(
5684       std::numeric_limits<ElementCount::ScalarTy>::max());
5685 
5686   // Disable scalable vectorization if the loop contains unsupported reductions.
5687   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5688   // FIXME: While for scalable vectors this is currently sufficient, this should
5689   // be replaced by a more detailed mechanism that filters out specific VFs,
5690   // instead of invalidating vectorization for a whole set of VFs based on the
5691   // MaxVF.
5692   if (!canVectorizeReductions(MaxScalableVF)) {
5693     reportVectorizationInfo(
5694         "Scalable vectorization not supported for the reduction "
5695         "operations found in this loop.",
5696         "ScalableVFUnfeasible", ORE, TheLoop);
5697     return ElementCount::getScalable(0);
5698   }
5699 
5700   if (Legal->isSafeForAnyVectorWidth())
5701     return MaxScalableVF;
5702 
5703   // Limit MaxScalableVF by the maximum safe dependence distance.
5704   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5705   MaxScalableVF = ElementCount::getScalable(
5706       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5707   if (!MaxScalableVF)
5708     reportVectorizationInfo(
5709         "Max legal vector width too small, scalable vectorization "
5710         "unfeasible.",
5711         "ScalableVFUnfeasible", ORE, TheLoop);
5712 
5713   return MaxScalableVF;
5714 }
5715 
5716 FixedScalableVFPair
5717 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5718                                                  ElementCount UserVF) {
5719   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5720   unsigned SmallestType, WidestType;
5721   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5722 
5723   // Get the maximum safe dependence distance in bits computed by LAA.
5724   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5725   // the memory accesses that is most restrictive (involved in the smallest
5726   // dependence distance).
5727   unsigned MaxSafeElements =
5728       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5729 
5730   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5731   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5732 
5733   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5734                     << ".\n");
5735   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5736                     << ".\n");
5737 
5738   // First analyze the UserVF, fall back if the UserVF should be ignored.
5739   if (UserVF) {
5740     auto MaxSafeUserVF =
5741         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5742 
5743     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF))
5744       return UserVF;
5745 
5746     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5747 
5748     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5749     // is better to ignore the hint and let the compiler choose a suitable VF.
5750     if (!UserVF.isScalable()) {
5751       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5752                         << " is unsafe, clamping to max safe VF="
5753                         << MaxSafeFixedVF << ".\n");
5754       ORE->emit([&]() {
5755         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5756                                           TheLoop->getStartLoc(),
5757                                           TheLoop->getHeader())
5758                << "User-specified vectorization factor "
5759                << ore::NV("UserVectorizationFactor", UserVF)
5760                << " is unsafe, clamping to maximum safe vectorization factor "
5761                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5762       });
5763       return MaxSafeFixedVF;
5764     }
5765 
5766     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5767                       << " is unsafe. Ignoring scalable UserVF.\n");
5768     ORE->emit([&]() {
5769       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5770                                         TheLoop->getStartLoc(),
5771                                         TheLoop->getHeader())
5772              << "User-specified vectorization factor "
5773              << ore::NV("UserVectorizationFactor", UserVF)
5774              << " is unsafe. Ignoring the hint to let the compiler pick a "
5775                 "suitable VF.";
5776     });
5777   }
5778 
5779   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5780                     << " / " << WidestType << " bits.\n");
5781 
5782   FixedScalableVFPair Result(ElementCount::getFixed(1),
5783                              ElementCount::getScalable(0));
5784   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5785                                            WidestType, MaxSafeFixedVF))
5786     Result.FixedVF = MaxVF;
5787 
5788   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5789                                            WidestType, MaxSafeScalableVF))
5790     if (MaxVF.isScalable()) {
5791       Result.ScalableVF = MaxVF;
5792       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5793                         << "\n");
5794     }
5795 
5796   return Result;
5797 }
5798 
5799 FixedScalableVFPair
5800 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5801   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5802     // TODO: It may by useful to do since it's still likely to be dynamically
5803     // uniform if the target can skip.
5804     reportVectorizationFailure(
5805         "Not inserting runtime ptr check for divergent target",
5806         "runtime pointer checks needed. Not enabled for divergent target",
5807         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5808     return FixedScalableVFPair::getNone();
5809   }
5810 
5811   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5812   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5813   if (TC == 1) {
5814     reportVectorizationFailure("Single iteration (non) loop",
5815         "loop trip count is one, irrelevant for vectorization",
5816         "SingleIterationLoop", ORE, TheLoop);
5817     return FixedScalableVFPair::getNone();
5818   }
5819 
5820   switch (ScalarEpilogueStatus) {
5821   case CM_ScalarEpilogueAllowed:
5822     return computeFeasibleMaxVF(TC, UserVF);
5823   case CM_ScalarEpilogueNotAllowedUsePredicate:
5824     LLVM_FALLTHROUGH;
5825   case CM_ScalarEpilogueNotNeededUsePredicate:
5826     LLVM_DEBUG(
5827         dbgs() << "LV: vector predicate hint/switch found.\n"
5828                << "LV: Not allowing scalar epilogue, creating predicated "
5829                << "vector loop.\n");
5830     break;
5831   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5832     // fallthrough as a special case of OptForSize
5833   case CM_ScalarEpilogueNotAllowedOptSize:
5834     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5835       LLVM_DEBUG(
5836           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5837     else
5838       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5839                         << "count.\n");
5840 
5841     // Bail if runtime checks are required, which are not good when optimising
5842     // for size.
5843     if (runtimeChecksRequired())
5844       return FixedScalableVFPair::getNone();
5845 
5846     break;
5847   }
5848 
5849   // The only loops we can vectorize without a scalar epilogue, are loops with
5850   // a bottom-test and a single exiting block. We'd have to handle the fact
5851   // that not every instruction executes on the last iteration.  This will
5852   // require a lane mask which varies through the vector loop body.  (TODO)
5853   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5854     // If there was a tail-folding hint/switch, but we can't fold the tail by
5855     // masking, fallback to a vectorization with a scalar epilogue.
5856     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5857       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5858                            "scalar epilogue instead.\n");
5859       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5860       return computeFeasibleMaxVF(TC, UserVF);
5861     }
5862     return FixedScalableVFPair::getNone();
5863   }
5864 
5865   // Now try the tail folding
5866 
5867   // Invalidate interleave groups that require an epilogue if we can't mask
5868   // the interleave-group.
5869   if (!useMaskedInterleavedAccesses(TTI)) {
5870     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5871            "No decisions should have been taken at this point");
5872     // Note: There is no need to invalidate any cost modeling decisions here, as
5873     // non where taken so far.
5874     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5875   }
5876 
5877   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF);
5878   // Avoid tail folding if the trip count is known to be a multiple of any VF
5879   // we chose.
5880   // FIXME: The condition below pessimises the case for fixed-width vectors,
5881   // when scalable VFs are also candidates for vectorization.
5882   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5883     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5884     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5885            "MaxFixedVF must be a power of 2");
5886     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5887                                    : MaxFixedVF.getFixedValue();
5888     ScalarEvolution *SE = PSE.getSE();
5889     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5890     const SCEV *ExitCount = SE->getAddExpr(
5891         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5892     const SCEV *Rem = SE->getURemExpr(
5893         SE->applyLoopGuards(ExitCount, TheLoop),
5894         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5895     if (Rem->isZero()) {
5896       // Accept MaxFixedVF if we do not have a tail.
5897       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5898       return MaxFactors;
5899     }
5900   }
5901 
5902   // If we don't know the precise trip count, or if the trip count that we
5903   // found modulo the vectorization factor is not zero, try to fold the tail
5904   // by masking.
5905   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5906   if (Legal->prepareToFoldTailByMasking()) {
5907     FoldTailByMasking = true;
5908     return MaxFactors;
5909   }
5910 
5911   // If there was a tail-folding hint/switch, but we can't fold the tail by
5912   // masking, fallback to a vectorization with a scalar epilogue.
5913   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5914     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5915                          "scalar epilogue instead.\n");
5916     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5917     return MaxFactors;
5918   }
5919 
5920   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5921     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5922     return FixedScalableVFPair::getNone();
5923   }
5924 
5925   if (TC == 0) {
5926     reportVectorizationFailure(
5927         "Unable to calculate the loop count due to complex control flow",
5928         "unable to calculate the loop count due to complex control flow",
5929         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5930     return FixedScalableVFPair::getNone();
5931   }
5932 
5933   reportVectorizationFailure(
5934       "Cannot optimize for size and vectorize at the same time.",
5935       "cannot optimize for size and vectorize at the same time. "
5936       "Enable vectorization of this loop with '#pragma clang loop "
5937       "vectorize(enable)' when compiling with -Os/-Oz",
5938       "NoTailLoopWithOptForSize", ORE, TheLoop);
5939   return FixedScalableVFPair::getNone();
5940 }
5941 
5942 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5943     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5944     const ElementCount &MaxSafeVF) {
5945   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5946   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5947       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5948                            : TargetTransformInfo::RGK_FixedWidthVector);
5949 
5950   // Convenience function to return the minimum of two ElementCounts.
5951   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5952     assert((LHS.isScalable() == RHS.isScalable()) &&
5953            "Scalable flags must match");
5954     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5955   };
5956 
5957   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5958   // Note that both WidestRegister and WidestType may not be a powers of 2.
5959   auto MaxVectorElementCount = ElementCount::get(
5960       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5961       ComputeScalableMaxVF);
5962   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5963   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5964                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5965 
5966   if (!MaxVectorElementCount) {
5967     LLVM_DEBUG(dbgs() << "LV: The target has no "
5968                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5969                       << " vector registers.\n");
5970     return ElementCount::getFixed(1);
5971   }
5972 
5973   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5974   if (ConstTripCount &&
5975       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5976       isPowerOf2_32(ConstTripCount)) {
5977     // We need to clamp the VF to be the ConstTripCount. There is no point in
5978     // choosing a higher viable VF as done in the loop below. If
5979     // MaxVectorElementCount is scalable, we only fall back on a fixed VF when
5980     // the TC is less than or equal to the known number of lanes.
5981     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5982                       << ConstTripCount << "\n");
5983     return TripCountEC;
5984   }
5985 
5986   ElementCount MaxVF = MaxVectorElementCount;
5987   if (TTI.shouldMaximizeVectorBandwidth() ||
5988       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5989     auto MaxVectorElementCountMaxBW = ElementCount::get(
5990         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5991         ComputeScalableMaxVF);
5992     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5993 
5994     // Collect all viable vectorization factors larger than the default MaxVF
5995     // (i.e. MaxVectorElementCount).
5996     SmallVector<ElementCount, 8> VFs;
5997     for (ElementCount VS = MaxVectorElementCount * 2;
5998          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5999       VFs.push_back(VS);
6000 
6001     // For each VF calculate its register usage.
6002     auto RUs = calculateRegisterUsage(VFs);
6003 
6004     // Select the largest VF which doesn't require more registers than existing
6005     // ones.
6006     for (int i = RUs.size() - 1; i >= 0; --i) {
6007       bool Selected = true;
6008       for (auto &pair : RUs[i].MaxLocalUsers) {
6009         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6010         if (pair.second > TargetNumRegisters)
6011           Selected = false;
6012       }
6013       if (Selected) {
6014         MaxVF = VFs[i];
6015         break;
6016       }
6017     }
6018     if (ElementCount MinVF =
6019             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
6020       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
6021         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
6022                           << ") with target's minimum: " << MinVF << '\n');
6023         MaxVF = MinVF;
6024       }
6025     }
6026   }
6027   return MaxVF;
6028 }
6029 
6030 bool LoopVectorizationCostModel::isMoreProfitable(
6031     const VectorizationFactor &A, const VectorizationFactor &B) const {
6032   InstructionCost::CostType CostA = *A.Cost.getValue();
6033   InstructionCost::CostType CostB = *B.Cost.getValue();
6034 
6035   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
6036 
6037   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
6038       MaxTripCount) {
6039     // If we are folding the tail and the trip count is a known (possibly small)
6040     // constant, the trip count will be rounded up to an integer number of
6041     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
6042     // which we compare directly. When not folding the tail, the total cost will
6043     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
6044     // approximated with the per-lane cost below instead of using the tripcount
6045     // as here.
6046     int64_t RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
6047     int64_t RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
6048     return RTCostA < RTCostB;
6049   }
6050 
6051   // When set to preferred, for now assume vscale may be larger than 1, so
6052   // that scalable vectorization is slightly favorable over fixed-width
6053   // vectorization.
6054   if (Hints->isScalableVectorizationPreferred())
6055     if (A.Width.isScalable() && !B.Width.isScalable())
6056       return (CostA * B.Width.getKnownMinValue()) <=
6057              (CostB * A.Width.getKnownMinValue());
6058 
6059   // To avoid the need for FP division:
6060   //      (CostA / A.Width) < (CostB / B.Width)
6061   // <=>  (CostA * B.Width) < (CostB * A.Width)
6062   return (CostA * B.Width.getKnownMinValue()) <
6063          (CostB * A.Width.getKnownMinValue());
6064 }
6065 
6066 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
6067     const ElementCountSet &VFCandidates) {
6068   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
6069   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
6070   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
6071   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
6072          "Expected Scalar VF to be a candidate");
6073 
6074   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
6075   VectorizationFactor ChosenFactor = ScalarCost;
6076 
6077   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6078   if (ForceVectorization && VFCandidates.size() > 1) {
6079     // Ignore scalar width, because the user explicitly wants vectorization.
6080     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6081     // evaluation.
6082     ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max();
6083   }
6084 
6085   for (const auto &i : VFCandidates) {
6086     // The cost for scalar VF=1 is already calculated, so ignore it.
6087     if (i.isScalar())
6088       continue;
6089 
6090     // Notice that the vector loop needs to be executed less times, so
6091     // we need to divide the cost of the vector loops by the width of
6092     // the vector elements.
6093     VectorizationCostTy C = expectedCost(i);
6094 
6095     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
6096     VectorizationFactor Candidate(i, C.first);
6097     LLVM_DEBUG(
6098         dbgs() << "LV: Vector loop of width " << i << " costs: "
6099                << (*Candidate.Cost.getValue() /
6100                    Candidate.Width.getKnownMinValue())
6101                << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "")
6102                << ".\n");
6103 
6104     if (!C.second && !ForceVectorization) {
6105       LLVM_DEBUG(
6106           dbgs() << "LV: Not considering vector loop of width " << i
6107                  << " because it will not generate any vector instructions.\n");
6108       continue;
6109     }
6110 
6111     // If profitable add it to ProfitableVF list.
6112     if (isMoreProfitable(Candidate, ScalarCost))
6113       ProfitableVFs.push_back(Candidate);
6114 
6115     if (isMoreProfitable(Candidate, ChosenFactor))
6116       ChosenFactor = Candidate;
6117   }
6118 
6119   if (!EnableCondStoresVectorization && NumPredStores) {
6120     reportVectorizationFailure("There are conditional stores.",
6121         "store that is conditionally executed prevents vectorization",
6122         "ConditionalStore", ORE, TheLoop);
6123     ChosenFactor = ScalarCost;
6124   }
6125 
6126   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
6127                  *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue())
6128                  dbgs()
6129              << "LV: Vectorization seems to be not beneficial, "
6130              << "but was forced by a user.\n");
6131   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
6132   return ChosenFactor;
6133 }
6134 
6135 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
6136     const Loop &L, ElementCount VF) const {
6137   // Cross iteration phis such as reductions need special handling and are
6138   // currently unsupported.
6139   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
6140         return Legal->isFirstOrderRecurrence(&Phi) ||
6141                Legal->isReductionVariable(&Phi);
6142       }))
6143     return false;
6144 
6145   // Phis with uses outside of the loop require special handling and are
6146   // currently unsupported.
6147   for (auto &Entry : Legal->getInductionVars()) {
6148     // Look for uses of the value of the induction at the last iteration.
6149     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
6150     for (User *U : PostInc->users())
6151       if (!L.contains(cast<Instruction>(U)))
6152         return false;
6153     // Look for uses of penultimate value of the induction.
6154     for (User *U : Entry.first->users())
6155       if (!L.contains(cast<Instruction>(U)))
6156         return false;
6157   }
6158 
6159   // Induction variables that are widened require special handling that is
6160   // currently not supported.
6161   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
6162         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
6163                  this->isProfitableToScalarize(Entry.first, VF));
6164       }))
6165     return false;
6166 
6167   return true;
6168 }
6169 
6170 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
6171     const ElementCount VF) const {
6172   // FIXME: We need a much better cost-model to take different parameters such
6173   // as register pressure, code size increase and cost of extra branches into
6174   // account. For now we apply a very crude heuristic and only consider loops
6175   // with vectorization factors larger than a certain value.
6176   // We also consider epilogue vectorization unprofitable for targets that don't
6177   // consider interleaving beneficial (eg. MVE).
6178   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
6179     return false;
6180   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
6181     return true;
6182   return false;
6183 }
6184 
6185 VectorizationFactor
6186 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
6187     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
6188   VectorizationFactor Result = VectorizationFactor::Disabled();
6189   if (!EnableEpilogueVectorization) {
6190     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
6191     return Result;
6192   }
6193 
6194   if (!isScalarEpilogueAllowed()) {
6195     LLVM_DEBUG(
6196         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
6197                   "allowed.\n";);
6198     return Result;
6199   }
6200 
6201   // FIXME: This can be fixed for scalable vectors later, because at this stage
6202   // the LoopVectorizer will only consider vectorizing a loop with scalable
6203   // vectors when the loop has a hint to enable vectorization for a given VF.
6204   if (MainLoopVF.isScalable()) {
6205     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
6206                          "yet supported.\n");
6207     return Result;
6208   }
6209 
6210   // Not really a cost consideration, but check for unsupported cases here to
6211   // simplify the logic.
6212   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
6213     LLVM_DEBUG(
6214         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
6215                   "not a supported candidate.\n";);
6216     return Result;
6217   }
6218 
6219   if (EpilogueVectorizationForceVF > 1) {
6220     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6221     if (LVP.hasPlanWithVFs(
6222             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6223       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6224     else {
6225       LLVM_DEBUG(
6226           dbgs()
6227               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6228       return Result;
6229     }
6230   }
6231 
6232   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6233       TheLoop->getHeader()->getParent()->hasMinSize()) {
6234     LLVM_DEBUG(
6235         dbgs()
6236             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6237     return Result;
6238   }
6239 
6240   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6241     return Result;
6242 
6243   for (auto &NextVF : ProfitableVFs)
6244     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6245         (Result.Width.getFixedValue() == 1 ||
6246          isMoreProfitable(NextVF, Result)) &&
6247         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6248       Result = NextVF;
6249 
6250   if (Result != VectorizationFactor::Disabled())
6251     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6252                       << Result.Width.getFixedValue() << "\n";);
6253   return Result;
6254 }
6255 
6256 std::pair<unsigned, unsigned>
6257 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6258   unsigned MinWidth = -1U;
6259   unsigned MaxWidth = 8;
6260   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6261 
6262   // For each block.
6263   for (BasicBlock *BB : TheLoop->blocks()) {
6264     // For each instruction in the loop.
6265     for (Instruction &I : BB->instructionsWithoutDebug()) {
6266       Type *T = I.getType();
6267 
6268       // Skip ignored values.
6269       if (ValuesToIgnore.count(&I))
6270         continue;
6271 
6272       // Only examine Loads, Stores and PHINodes.
6273       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6274         continue;
6275 
6276       // Examine PHI nodes that are reduction variables. Update the type to
6277       // account for the recurrence type.
6278       if (auto *PN = dyn_cast<PHINode>(&I)) {
6279         if (!Legal->isReductionVariable(PN))
6280           continue;
6281         const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN];
6282         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6283             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6284                                       RdxDesc.getRecurrenceType(),
6285                                       TargetTransformInfo::ReductionFlags()))
6286           continue;
6287         T = RdxDesc.getRecurrenceType();
6288       }
6289 
6290       // Examine the stored values.
6291       if (auto *ST = dyn_cast<StoreInst>(&I))
6292         T = ST->getValueOperand()->getType();
6293 
6294       // Ignore loaded pointer types and stored pointer types that are not
6295       // vectorizable.
6296       //
6297       // FIXME: The check here attempts to predict whether a load or store will
6298       //        be vectorized. We only know this for certain after a VF has
6299       //        been selected. Here, we assume that if an access can be
6300       //        vectorized, it will be. We should also look at extending this
6301       //        optimization to non-pointer types.
6302       //
6303       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6304           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6305         continue;
6306 
6307       MinWidth = std::min(MinWidth,
6308                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6309       MaxWidth = std::max(MaxWidth,
6310                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6311     }
6312   }
6313 
6314   return {MinWidth, MaxWidth};
6315 }
6316 
6317 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6318                                                            unsigned LoopCost) {
6319   // -- The interleave heuristics --
6320   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6321   // There are many micro-architectural considerations that we can't predict
6322   // at this level. For example, frontend pressure (on decode or fetch) due to
6323   // code size, or the number and capabilities of the execution ports.
6324   //
6325   // We use the following heuristics to select the interleave count:
6326   // 1. If the code has reductions, then we interleave to break the cross
6327   // iteration dependency.
6328   // 2. If the loop is really small, then we interleave to reduce the loop
6329   // overhead.
6330   // 3. We don't interleave if we think that we will spill registers to memory
6331   // due to the increased register pressure.
6332 
6333   if (!isScalarEpilogueAllowed())
6334     return 1;
6335 
6336   // We used the distance for the interleave count.
6337   if (Legal->getMaxSafeDepDistBytes() != -1U)
6338     return 1;
6339 
6340   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6341   const bool HasReductions = !Legal->getReductionVars().empty();
6342   // Do not interleave loops with a relatively small known or estimated trip
6343   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6344   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6345   // because with the above conditions interleaving can expose ILP and break
6346   // cross iteration dependences for reductions.
6347   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6348       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6349     return 1;
6350 
6351   RegisterUsage R = calculateRegisterUsage({VF})[0];
6352   // We divide by these constants so assume that we have at least one
6353   // instruction that uses at least one register.
6354   for (auto& pair : R.MaxLocalUsers) {
6355     pair.second = std::max(pair.second, 1U);
6356   }
6357 
6358   // We calculate the interleave count using the following formula.
6359   // Subtract the number of loop invariants from the number of available
6360   // registers. These registers are used by all of the interleaved instances.
6361   // Next, divide the remaining registers by the number of registers that is
6362   // required by the loop, in order to estimate how many parallel instances
6363   // fit without causing spills. All of this is rounded down if necessary to be
6364   // a power of two. We want power of two interleave count to simplify any
6365   // addressing operations or alignment considerations.
6366   // We also want power of two interleave counts to ensure that the induction
6367   // variable of the vector loop wraps to zero, when tail is folded by masking;
6368   // this currently happens when OptForSize, in which case IC is set to 1 above.
6369   unsigned IC = UINT_MAX;
6370 
6371   for (auto& pair : R.MaxLocalUsers) {
6372     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6373     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6374                       << " registers of "
6375                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6376     if (VF.isScalar()) {
6377       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6378         TargetNumRegisters = ForceTargetNumScalarRegs;
6379     } else {
6380       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6381         TargetNumRegisters = ForceTargetNumVectorRegs;
6382     }
6383     unsigned MaxLocalUsers = pair.second;
6384     unsigned LoopInvariantRegs = 0;
6385     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6386       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6387 
6388     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6389     // Don't count the induction variable as interleaved.
6390     if (EnableIndVarRegisterHeur) {
6391       TmpIC =
6392           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6393                         std::max(1U, (MaxLocalUsers - 1)));
6394     }
6395 
6396     IC = std::min(IC, TmpIC);
6397   }
6398 
6399   // Clamp the interleave ranges to reasonable counts.
6400   unsigned MaxInterleaveCount =
6401       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6402 
6403   // Check if the user has overridden the max.
6404   if (VF.isScalar()) {
6405     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6406       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6407   } else {
6408     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6409       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6410   }
6411 
6412   // If trip count is known or estimated compile time constant, limit the
6413   // interleave count to be less than the trip count divided by VF, provided it
6414   // is at least 1.
6415   //
6416   // For scalable vectors we can't know if interleaving is beneficial. It may
6417   // not be beneficial for small loops if none of the lanes in the second vector
6418   // iterations is enabled. However, for larger loops, there is likely to be a
6419   // similar benefit as for fixed-width vectors. For now, we choose to leave
6420   // the InterleaveCount as if vscale is '1', although if some information about
6421   // the vector is known (e.g. min vector size), we can make a better decision.
6422   if (BestKnownTC) {
6423     MaxInterleaveCount =
6424         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6425     // Make sure MaxInterleaveCount is greater than 0.
6426     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6427   }
6428 
6429   assert(MaxInterleaveCount > 0 &&
6430          "Maximum interleave count must be greater than 0");
6431 
6432   // Clamp the calculated IC to be between the 1 and the max interleave count
6433   // that the target and trip count allows.
6434   if (IC > MaxInterleaveCount)
6435     IC = MaxInterleaveCount;
6436   else
6437     // Make sure IC is greater than 0.
6438     IC = std::max(1u, IC);
6439 
6440   assert(IC > 0 && "Interleave count must be greater than 0.");
6441 
6442   // If we did not calculate the cost for VF (because the user selected the VF)
6443   // then we calculate the cost of VF here.
6444   if (LoopCost == 0) {
6445     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6446     LoopCost = *expectedCost(VF).first.getValue();
6447   }
6448 
6449   assert(LoopCost && "Non-zero loop cost expected");
6450 
6451   // Interleave if we vectorized this loop and there is a reduction that could
6452   // benefit from interleaving.
6453   if (VF.isVector() && HasReductions) {
6454     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6455     return IC;
6456   }
6457 
6458   // Note that if we've already vectorized the loop we will have done the
6459   // runtime check and so interleaving won't require further checks.
6460   bool InterleavingRequiresRuntimePointerCheck =
6461       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6462 
6463   // We want to interleave small loops in order to reduce the loop overhead and
6464   // potentially expose ILP opportunities.
6465   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6466                     << "LV: IC is " << IC << '\n'
6467                     << "LV: VF is " << VF << '\n');
6468   const bool AggressivelyInterleaveReductions =
6469       TTI.enableAggressiveInterleaving(HasReductions);
6470   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6471     // We assume that the cost overhead is 1 and we use the cost model
6472     // to estimate the cost of the loop and interleave until the cost of the
6473     // loop overhead is about 5% of the cost of the loop.
6474     unsigned SmallIC =
6475         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6476 
6477     // Interleave until store/load ports (estimated by max interleave count) are
6478     // saturated.
6479     unsigned NumStores = Legal->getNumStores();
6480     unsigned NumLoads = Legal->getNumLoads();
6481     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6482     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6483 
6484     // If we have a scalar reduction (vector reductions are already dealt with
6485     // by this point), we can increase the critical path length if the loop
6486     // we're interleaving is inside another loop. Limit, by default to 2, so the
6487     // critical path only gets increased by one reduction operation.
6488     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6489       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6490       SmallIC = std::min(SmallIC, F);
6491       StoresIC = std::min(StoresIC, F);
6492       LoadsIC = std::min(LoadsIC, F);
6493     }
6494 
6495     if (EnableLoadStoreRuntimeInterleave &&
6496         std::max(StoresIC, LoadsIC) > SmallIC) {
6497       LLVM_DEBUG(
6498           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6499       return std::max(StoresIC, LoadsIC);
6500     }
6501 
6502     // If there are scalar reductions and TTI has enabled aggressive
6503     // interleaving for reductions, we will interleave to expose ILP.
6504     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6505         AggressivelyInterleaveReductions) {
6506       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6507       // Interleave no less than SmallIC but not as aggressive as the normal IC
6508       // to satisfy the rare situation when resources are too limited.
6509       return std::max(IC / 2, SmallIC);
6510     } else {
6511       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6512       return SmallIC;
6513     }
6514   }
6515 
6516   // Interleave if this is a large loop (small loops are already dealt with by
6517   // this point) that could benefit from interleaving.
6518   if (AggressivelyInterleaveReductions) {
6519     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6520     return IC;
6521   }
6522 
6523   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6524   return 1;
6525 }
6526 
6527 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6528 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6529   // This function calculates the register usage by measuring the highest number
6530   // of values that are alive at a single location. Obviously, this is a very
6531   // rough estimation. We scan the loop in a topological order in order and
6532   // assign a number to each instruction. We use RPO to ensure that defs are
6533   // met before their users. We assume that each instruction that has in-loop
6534   // users starts an interval. We record every time that an in-loop value is
6535   // used, so we have a list of the first and last occurrences of each
6536   // instruction. Next, we transpose this data structure into a multi map that
6537   // holds the list of intervals that *end* at a specific location. This multi
6538   // map allows us to perform a linear search. We scan the instructions linearly
6539   // and record each time that a new interval starts, by placing it in a set.
6540   // If we find this value in the multi-map then we remove it from the set.
6541   // The max register usage is the maximum size of the set.
6542   // We also search for instructions that are defined outside the loop, but are
6543   // used inside the loop. We need this number separately from the max-interval
6544   // usage number because when we unroll, loop-invariant values do not take
6545   // more register.
6546   LoopBlocksDFS DFS(TheLoop);
6547   DFS.perform(LI);
6548 
6549   RegisterUsage RU;
6550 
6551   // Each 'key' in the map opens a new interval. The values
6552   // of the map are the index of the 'last seen' usage of the
6553   // instruction that is the key.
6554   using IntervalMap = DenseMap<Instruction *, unsigned>;
6555 
6556   // Maps instruction to its index.
6557   SmallVector<Instruction *, 64> IdxToInstr;
6558   // Marks the end of each interval.
6559   IntervalMap EndPoint;
6560   // Saves the list of instruction indices that are used in the loop.
6561   SmallPtrSet<Instruction *, 8> Ends;
6562   // Saves the list of values that are used in the loop but are
6563   // defined outside the loop, such as arguments and constants.
6564   SmallPtrSet<Value *, 8> LoopInvariants;
6565 
6566   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6567     for (Instruction &I : BB->instructionsWithoutDebug()) {
6568       IdxToInstr.push_back(&I);
6569 
6570       // Save the end location of each USE.
6571       for (Value *U : I.operands()) {
6572         auto *Instr = dyn_cast<Instruction>(U);
6573 
6574         // Ignore non-instruction values such as arguments, constants, etc.
6575         if (!Instr)
6576           continue;
6577 
6578         // If this instruction is outside the loop then record it and continue.
6579         if (!TheLoop->contains(Instr)) {
6580           LoopInvariants.insert(Instr);
6581           continue;
6582         }
6583 
6584         // Overwrite previous end points.
6585         EndPoint[Instr] = IdxToInstr.size();
6586         Ends.insert(Instr);
6587       }
6588     }
6589   }
6590 
6591   // Saves the list of intervals that end with the index in 'key'.
6592   using InstrList = SmallVector<Instruction *, 2>;
6593   DenseMap<unsigned, InstrList> TransposeEnds;
6594 
6595   // Transpose the EndPoints to a list of values that end at each index.
6596   for (auto &Interval : EndPoint)
6597     TransposeEnds[Interval.second].push_back(Interval.first);
6598 
6599   SmallPtrSet<Instruction *, 8> OpenIntervals;
6600   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6601   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6602 
6603   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6604 
6605   // A lambda that gets the register usage for the given type and VF.
6606   const auto &TTICapture = TTI;
6607   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6608     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6609       return 0;
6610     return *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6611   };
6612 
6613   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6614     Instruction *I = IdxToInstr[i];
6615 
6616     // Remove all of the instructions that end at this location.
6617     InstrList &List = TransposeEnds[i];
6618     for (Instruction *ToRemove : List)
6619       OpenIntervals.erase(ToRemove);
6620 
6621     // Ignore instructions that are never used within the loop.
6622     if (!Ends.count(I))
6623       continue;
6624 
6625     // Skip ignored values.
6626     if (ValuesToIgnore.count(I))
6627       continue;
6628 
6629     // For each VF find the maximum usage of registers.
6630     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6631       // Count the number of live intervals.
6632       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6633 
6634       if (VFs[j].isScalar()) {
6635         for (auto Inst : OpenIntervals) {
6636           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6637           if (RegUsage.find(ClassID) == RegUsage.end())
6638             RegUsage[ClassID] = 1;
6639           else
6640             RegUsage[ClassID] += 1;
6641         }
6642       } else {
6643         collectUniformsAndScalars(VFs[j]);
6644         for (auto Inst : OpenIntervals) {
6645           // Skip ignored values for VF > 1.
6646           if (VecValuesToIgnore.count(Inst))
6647             continue;
6648           if (isScalarAfterVectorization(Inst, VFs[j])) {
6649             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6650             if (RegUsage.find(ClassID) == RegUsage.end())
6651               RegUsage[ClassID] = 1;
6652             else
6653               RegUsage[ClassID] += 1;
6654           } else {
6655             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6656             if (RegUsage.find(ClassID) == RegUsage.end())
6657               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6658             else
6659               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6660           }
6661         }
6662       }
6663 
6664       for (auto& pair : RegUsage) {
6665         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6666           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6667         else
6668           MaxUsages[j][pair.first] = pair.second;
6669       }
6670     }
6671 
6672     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6673                       << OpenIntervals.size() << '\n');
6674 
6675     // Add the current instruction to the list of open intervals.
6676     OpenIntervals.insert(I);
6677   }
6678 
6679   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6680     SmallMapVector<unsigned, unsigned, 4> Invariant;
6681 
6682     for (auto Inst : LoopInvariants) {
6683       unsigned Usage =
6684           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6685       unsigned ClassID =
6686           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6687       if (Invariant.find(ClassID) == Invariant.end())
6688         Invariant[ClassID] = Usage;
6689       else
6690         Invariant[ClassID] += Usage;
6691     }
6692 
6693     LLVM_DEBUG({
6694       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6695       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6696              << " item\n";
6697       for (const auto &pair : MaxUsages[i]) {
6698         dbgs() << "LV(REG): RegisterClass: "
6699                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6700                << " registers\n";
6701       }
6702       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6703              << " item\n";
6704       for (const auto &pair : Invariant) {
6705         dbgs() << "LV(REG): RegisterClass: "
6706                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6707                << " registers\n";
6708       }
6709     });
6710 
6711     RU.LoopInvariantRegs = Invariant;
6712     RU.MaxLocalUsers = MaxUsages[i];
6713     RUs[i] = RU;
6714   }
6715 
6716   return RUs;
6717 }
6718 
6719 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6720   // TODO: Cost model for emulated masked load/store is completely
6721   // broken. This hack guides the cost model to use an artificially
6722   // high enough value to practically disable vectorization with such
6723   // operations, except where previously deployed legality hack allowed
6724   // using very low cost values. This is to avoid regressions coming simply
6725   // from moving "masked load/store" check from legality to cost model.
6726   // Masked Load/Gather emulation was previously never allowed.
6727   // Limited number of Masked Store/Scatter emulation was allowed.
6728   assert(isPredicatedInst(I) &&
6729          "Expecting a scalar emulated instruction");
6730   return isa<LoadInst>(I) ||
6731          (isa<StoreInst>(I) &&
6732           NumPredStores > NumberOfStoresToPredicate);
6733 }
6734 
6735 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6736   // If we aren't vectorizing the loop, or if we've already collected the
6737   // instructions to scalarize, there's nothing to do. Collection may already
6738   // have occurred if we have a user-selected VF and are now computing the
6739   // expected cost for interleaving.
6740   if (VF.isScalar() || VF.isZero() ||
6741       InstsToScalarize.find(VF) != InstsToScalarize.end())
6742     return;
6743 
6744   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6745   // not profitable to scalarize any instructions, the presence of VF in the
6746   // map will indicate that we've analyzed it already.
6747   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6748 
6749   // Find all the instructions that are scalar with predication in the loop and
6750   // determine if it would be better to not if-convert the blocks they are in.
6751   // If so, we also record the instructions to scalarize.
6752   for (BasicBlock *BB : TheLoop->blocks()) {
6753     if (!blockNeedsPredication(BB))
6754       continue;
6755     for (Instruction &I : *BB)
6756       if (isScalarWithPredication(&I)) {
6757         ScalarCostsTy ScalarCosts;
6758         // Do not apply discount logic if hacked cost is needed
6759         // for emulated masked memrefs.
6760         if (!useEmulatedMaskMemRefHack(&I) &&
6761             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6762           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6763         // Remember that BB will remain after vectorization.
6764         PredicatedBBsAfterVectorization.insert(BB);
6765       }
6766   }
6767 }
6768 
6769 int LoopVectorizationCostModel::computePredInstDiscount(
6770     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6771   assert(!isUniformAfterVectorization(PredInst, VF) &&
6772          "Instruction marked uniform-after-vectorization will be predicated");
6773 
6774   // Initialize the discount to zero, meaning that the scalar version and the
6775   // vector version cost the same.
6776   InstructionCost Discount = 0;
6777 
6778   // Holds instructions to analyze. The instructions we visit are mapped in
6779   // ScalarCosts. Those instructions are the ones that would be scalarized if
6780   // we find that the scalar version costs less.
6781   SmallVector<Instruction *, 8> Worklist;
6782 
6783   // Returns true if the given instruction can be scalarized.
6784   auto canBeScalarized = [&](Instruction *I) -> bool {
6785     // We only attempt to scalarize instructions forming a single-use chain
6786     // from the original predicated block that would otherwise be vectorized.
6787     // Although not strictly necessary, we give up on instructions we know will
6788     // already be scalar to avoid traversing chains that are unlikely to be
6789     // beneficial.
6790     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6791         isScalarAfterVectorization(I, VF))
6792       return false;
6793 
6794     // If the instruction is scalar with predication, it will be analyzed
6795     // separately. We ignore it within the context of PredInst.
6796     if (isScalarWithPredication(I))
6797       return false;
6798 
6799     // If any of the instruction's operands are uniform after vectorization,
6800     // the instruction cannot be scalarized. This prevents, for example, a
6801     // masked load from being scalarized.
6802     //
6803     // We assume we will only emit a value for lane zero of an instruction
6804     // marked uniform after vectorization, rather than VF identical values.
6805     // Thus, if we scalarize an instruction that uses a uniform, we would
6806     // create uses of values corresponding to the lanes we aren't emitting code
6807     // for. This behavior can be changed by allowing getScalarValue to clone
6808     // the lane zero values for uniforms rather than asserting.
6809     for (Use &U : I->operands())
6810       if (auto *J = dyn_cast<Instruction>(U.get()))
6811         if (isUniformAfterVectorization(J, VF))
6812           return false;
6813 
6814     // Otherwise, we can scalarize the instruction.
6815     return true;
6816   };
6817 
6818   // Compute the expected cost discount from scalarizing the entire expression
6819   // feeding the predicated instruction. We currently only consider expressions
6820   // that are single-use instruction chains.
6821   Worklist.push_back(PredInst);
6822   while (!Worklist.empty()) {
6823     Instruction *I = Worklist.pop_back_val();
6824 
6825     // If we've already analyzed the instruction, there's nothing to do.
6826     if (ScalarCosts.find(I) != ScalarCosts.end())
6827       continue;
6828 
6829     // Compute the cost of the vector instruction. Note that this cost already
6830     // includes the scalarization overhead of the predicated instruction.
6831     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6832 
6833     // Compute the cost of the scalarized instruction. This cost is the cost of
6834     // the instruction as if it wasn't if-converted and instead remained in the
6835     // predicated block. We will scale this cost by block probability after
6836     // computing the scalarization overhead.
6837     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6838     InstructionCost ScalarCost =
6839         VF.getKnownMinValue() *
6840         getInstructionCost(I, ElementCount::getFixed(1)).first;
6841 
6842     // Compute the scalarization overhead of needed insertelement instructions
6843     // and phi nodes.
6844     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6845       ScalarCost += TTI.getScalarizationOverhead(
6846           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6847           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6848       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6849       ScalarCost +=
6850           VF.getKnownMinValue() *
6851           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6852     }
6853 
6854     // Compute the scalarization overhead of needed extractelement
6855     // instructions. For each of the instruction's operands, if the operand can
6856     // be scalarized, add it to the worklist; otherwise, account for the
6857     // overhead.
6858     for (Use &U : I->operands())
6859       if (auto *J = dyn_cast<Instruction>(U.get())) {
6860         assert(VectorType::isValidElementType(J->getType()) &&
6861                "Instruction has non-scalar type");
6862         if (canBeScalarized(J))
6863           Worklist.push_back(J);
6864         else if (needsExtract(J, VF)) {
6865           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6866           ScalarCost += TTI.getScalarizationOverhead(
6867               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6868               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6869         }
6870       }
6871 
6872     // Scale the total scalar cost by block probability.
6873     ScalarCost /= getReciprocalPredBlockProb();
6874 
6875     // Compute the discount. A non-negative discount means the vector version
6876     // of the instruction costs more, and scalarizing would be beneficial.
6877     Discount += VectorCost - ScalarCost;
6878     ScalarCosts[I] = ScalarCost;
6879   }
6880 
6881   return *Discount.getValue();
6882 }
6883 
6884 LoopVectorizationCostModel::VectorizationCostTy
6885 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6886   VectorizationCostTy Cost;
6887 
6888   // For each block.
6889   for (BasicBlock *BB : TheLoop->blocks()) {
6890     VectorizationCostTy BlockCost;
6891 
6892     // For each instruction in the old loop.
6893     for (Instruction &I : BB->instructionsWithoutDebug()) {
6894       // Skip ignored values.
6895       if (ValuesToIgnore.count(&I) ||
6896           (VF.isVector() && VecValuesToIgnore.count(&I)))
6897         continue;
6898 
6899       VectorizationCostTy C = getInstructionCost(&I, VF);
6900 
6901       // Check if we should override the cost.
6902       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6903         C.first = InstructionCost(ForceTargetInstructionCost);
6904 
6905       BlockCost.first += C.first;
6906       BlockCost.second |= C.second;
6907       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6908                         << " for VF " << VF << " For instruction: " << I
6909                         << '\n');
6910     }
6911 
6912     // If we are vectorizing a predicated block, it will have been
6913     // if-converted. This means that the block's instructions (aside from
6914     // stores and instructions that may divide by zero) will now be
6915     // unconditionally executed. For the scalar case, we may not always execute
6916     // the predicated block, if it is an if-else block. Thus, scale the block's
6917     // cost by the probability of executing it. blockNeedsPredication from
6918     // Legal is used so as to not include all blocks in tail folded loops.
6919     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6920       BlockCost.first /= getReciprocalPredBlockProb();
6921 
6922     Cost.first += BlockCost.first;
6923     Cost.second |= BlockCost.second;
6924   }
6925 
6926   return Cost;
6927 }
6928 
6929 /// Gets Address Access SCEV after verifying that the access pattern
6930 /// is loop invariant except the induction variable dependence.
6931 ///
6932 /// This SCEV can be sent to the Target in order to estimate the address
6933 /// calculation cost.
6934 static const SCEV *getAddressAccessSCEV(
6935               Value *Ptr,
6936               LoopVectorizationLegality *Legal,
6937               PredicatedScalarEvolution &PSE,
6938               const Loop *TheLoop) {
6939 
6940   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6941   if (!Gep)
6942     return nullptr;
6943 
6944   // We are looking for a gep with all loop invariant indices except for one
6945   // which should be an induction variable.
6946   auto SE = PSE.getSE();
6947   unsigned NumOperands = Gep->getNumOperands();
6948   for (unsigned i = 1; i < NumOperands; ++i) {
6949     Value *Opd = Gep->getOperand(i);
6950     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6951         !Legal->isInductionVariable(Opd))
6952       return nullptr;
6953   }
6954 
6955   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6956   return PSE.getSCEV(Ptr);
6957 }
6958 
6959 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6960   return Legal->hasStride(I->getOperand(0)) ||
6961          Legal->hasStride(I->getOperand(1));
6962 }
6963 
6964 InstructionCost
6965 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6966                                                         ElementCount VF) {
6967   assert(VF.isVector() &&
6968          "Scalarization cost of instruction implies vectorization.");
6969   if (VF.isScalable())
6970     return InstructionCost::getInvalid();
6971 
6972   Type *ValTy = getLoadStoreType(I);
6973   auto SE = PSE.getSE();
6974 
6975   unsigned AS = getLoadStoreAddressSpace(I);
6976   Value *Ptr = getLoadStorePointerOperand(I);
6977   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6978 
6979   // Figure out whether the access is strided and get the stride value
6980   // if it's known in compile time
6981   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6982 
6983   // Get the cost of the scalar memory instruction and address computation.
6984   InstructionCost Cost =
6985       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6986 
6987   // Don't pass *I here, since it is scalar but will actually be part of a
6988   // vectorized loop where the user of it is a vectorized instruction.
6989   const Align Alignment = getLoadStoreAlignment(I);
6990   Cost += VF.getKnownMinValue() *
6991           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6992                               AS, TTI::TCK_RecipThroughput);
6993 
6994   // Get the overhead of the extractelement and insertelement instructions
6995   // we might create due to scalarization.
6996   Cost += getScalarizationOverhead(I, VF);
6997 
6998   // If we have a predicated load/store, it will need extra i1 extracts and
6999   // conditional branches, but may not be executed for each vector lane. Scale
7000   // the cost by the probability of executing the predicated block.
7001   if (isPredicatedInst(I)) {
7002     Cost /= getReciprocalPredBlockProb();
7003 
7004     // Add the cost of an i1 extract and a branch
7005     auto *Vec_i1Ty =
7006         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
7007     Cost += TTI.getScalarizationOverhead(
7008         Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7009         /*Insert=*/false, /*Extract=*/true);
7010     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
7011 
7012     if (useEmulatedMaskMemRefHack(I))
7013       // Artificially setting to a high enough value to practically disable
7014       // vectorization with such operations.
7015       Cost = 3000000;
7016   }
7017 
7018   return Cost;
7019 }
7020 
7021 InstructionCost
7022 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
7023                                                     ElementCount VF) {
7024   Type *ValTy = getLoadStoreType(I);
7025   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7026   Value *Ptr = getLoadStorePointerOperand(I);
7027   unsigned AS = getLoadStoreAddressSpace(I);
7028   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
7029   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7030 
7031   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7032          "Stride should be 1 or -1 for consecutive memory access");
7033   const Align Alignment = getLoadStoreAlignment(I);
7034   InstructionCost Cost = 0;
7035   if (Legal->isMaskRequired(I))
7036     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7037                                       CostKind);
7038   else
7039     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7040                                 CostKind, I);
7041 
7042   bool Reverse = ConsecutiveStride < 0;
7043   if (Reverse)
7044     Cost +=
7045         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7046   return Cost;
7047 }
7048 
7049 InstructionCost
7050 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
7051                                                 ElementCount VF) {
7052   assert(Legal->isUniformMemOp(*I));
7053 
7054   Type *ValTy = getLoadStoreType(I);
7055   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7056   const Align Alignment = getLoadStoreAlignment(I);
7057   unsigned AS = getLoadStoreAddressSpace(I);
7058   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7059   if (isa<LoadInst>(I)) {
7060     return TTI.getAddressComputationCost(ValTy) +
7061            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
7062                                CostKind) +
7063            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7064   }
7065   StoreInst *SI = cast<StoreInst>(I);
7066 
7067   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
7068   return TTI.getAddressComputationCost(ValTy) +
7069          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
7070                              CostKind) +
7071          (isLoopInvariantStoreValue
7072               ? 0
7073               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
7074                                        VF.getKnownMinValue() - 1));
7075 }
7076 
7077 InstructionCost
7078 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7079                                                  ElementCount VF) {
7080   Type *ValTy = getLoadStoreType(I);
7081   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7082   const Align Alignment = getLoadStoreAlignment(I);
7083   const Value *Ptr = getLoadStorePointerOperand(I);
7084 
7085   return TTI.getAddressComputationCost(VectorTy) +
7086          TTI.getGatherScatterOpCost(
7087              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
7088              TargetTransformInfo::TCK_RecipThroughput, I);
7089 }
7090 
7091 InstructionCost
7092 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7093                                                    ElementCount VF) {
7094   // TODO: Once we have support for interleaving with scalable vectors
7095   // we can calculate the cost properly here.
7096   if (VF.isScalable())
7097     return InstructionCost::getInvalid();
7098 
7099   Type *ValTy = getLoadStoreType(I);
7100   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7101   unsigned AS = getLoadStoreAddressSpace(I);
7102 
7103   auto Group = getInterleavedAccessGroup(I);
7104   assert(Group && "Fail to get an interleaved access group.");
7105 
7106   unsigned InterleaveFactor = Group->getFactor();
7107   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7108 
7109   // Holds the indices of existing members in an interleaved load group.
7110   // An interleaved store group doesn't need this as it doesn't allow gaps.
7111   SmallVector<unsigned, 4> Indices;
7112   if (isa<LoadInst>(I)) {
7113     for (unsigned i = 0; i < InterleaveFactor; i++)
7114       if (Group->getMember(i))
7115         Indices.push_back(i);
7116   }
7117 
7118   // Calculate the cost of the whole interleaved group.
7119   bool UseMaskForGaps =
7120       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
7121   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
7122       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
7123       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
7124 
7125   if (Group->isReverse()) {
7126     // TODO: Add support for reversed masked interleaved access.
7127     assert(!Legal->isMaskRequired(I) &&
7128            "Reverse masked interleaved access not supported.");
7129     Cost +=
7130         Group->getNumMembers() *
7131         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7132   }
7133   return Cost;
7134 }
7135 
7136 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
7137     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
7138   // Early exit for no inloop reductions
7139   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
7140     return InstructionCost::getInvalid();
7141   auto *VectorTy = cast<VectorType>(Ty);
7142 
7143   // We are looking for a pattern of, and finding the minimal acceptable cost:
7144   //  reduce(mul(ext(A), ext(B))) or
7145   //  reduce(mul(A, B)) or
7146   //  reduce(ext(A)) or
7147   //  reduce(A).
7148   // The basic idea is that we walk down the tree to do that, finding the root
7149   // reduction instruction in InLoopReductionImmediateChains. From there we find
7150   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
7151   // of the components. If the reduction cost is lower then we return it for the
7152   // reduction instruction and 0 for the other instructions in the pattern. If
7153   // it is not we return an invalid cost specifying the orignal cost method
7154   // should be used.
7155   Instruction *RetI = I;
7156   if ((RetI->getOpcode() == Instruction::SExt ||
7157        RetI->getOpcode() == Instruction::ZExt)) {
7158     if (!RetI->hasOneUser())
7159       return InstructionCost::getInvalid();
7160     RetI = RetI->user_back();
7161   }
7162   if (RetI->getOpcode() == Instruction::Mul &&
7163       RetI->user_back()->getOpcode() == Instruction::Add) {
7164     if (!RetI->hasOneUser())
7165       return InstructionCost::getInvalid();
7166     RetI = RetI->user_back();
7167   }
7168 
7169   // Test if the found instruction is a reduction, and if not return an invalid
7170   // cost specifying the parent to use the original cost modelling.
7171   if (!InLoopReductionImmediateChains.count(RetI))
7172     return InstructionCost::getInvalid();
7173 
7174   // Find the reduction this chain is a part of and calculate the basic cost of
7175   // the reduction on its own.
7176   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
7177   Instruction *ReductionPhi = LastChain;
7178   while (!isa<PHINode>(ReductionPhi))
7179     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
7180 
7181   const RecurrenceDescriptor &RdxDesc =
7182       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
7183   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
7184       RdxDesc.getOpcode(), VectorTy, false, CostKind);
7185 
7186   // Get the operand that was not the reduction chain and match it to one of the
7187   // patterns, returning the better cost if it is found.
7188   Instruction *RedOp = RetI->getOperand(1) == LastChain
7189                            ? dyn_cast<Instruction>(RetI->getOperand(0))
7190                            : dyn_cast<Instruction>(RetI->getOperand(1));
7191 
7192   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7193 
7194   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
7195       !TheLoop->isLoopInvariant(RedOp)) {
7196     bool IsUnsigned = isa<ZExtInst>(RedOp);
7197     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7198     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7199         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7200         CostKind);
7201 
7202     InstructionCost ExtCost =
7203         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7204                              TTI::CastContextHint::None, CostKind, RedOp);
7205     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7206       return I == RetI ? *RedCost.getValue() : 0;
7207   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
7208     Instruction *Mul = RedOp;
7209     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
7210     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
7211     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
7212         Op0->getOpcode() == Op1->getOpcode() &&
7213         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7214         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7215       bool IsUnsigned = isa<ZExtInst>(Op0);
7216       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7217       // reduce(mul(ext, ext))
7218       InstructionCost ExtCost =
7219           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7220                                TTI::CastContextHint::None, CostKind, Op0);
7221       InstructionCost MulCost =
7222           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7223 
7224       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7225           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7226           CostKind);
7227 
7228       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7229         return I == RetI ? *RedCost.getValue() : 0;
7230     } else {
7231       InstructionCost MulCost =
7232           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7233 
7234       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7235           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7236           CostKind);
7237 
7238       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7239         return I == RetI ? *RedCost.getValue() : 0;
7240     }
7241   }
7242 
7243   return I == RetI ? BaseCost : InstructionCost::getInvalid();
7244 }
7245 
7246 InstructionCost
7247 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7248                                                      ElementCount VF) {
7249   // Calculate scalar cost only. Vectorization cost should be ready at this
7250   // moment.
7251   if (VF.isScalar()) {
7252     Type *ValTy = getLoadStoreType(I);
7253     const Align Alignment = getLoadStoreAlignment(I);
7254     unsigned AS = getLoadStoreAddressSpace(I);
7255 
7256     return TTI.getAddressComputationCost(ValTy) +
7257            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7258                                TTI::TCK_RecipThroughput, I);
7259   }
7260   return getWideningCost(I, VF);
7261 }
7262 
7263 LoopVectorizationCostModel::VectorizationCostTy
7264 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7265                                                ElementCount VF) {
7266   // If we know that this instruction will remain uniform, check the cost of
7267   // the scalar version.
7268   if (isUniformAfterVectorization(I, VF))
7269     VF = ElementCount::getFixed(1);
7270 
7271   if (VF.isVector() && isProfitableToScalarize(I, VF))
7272     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7273 
7274   // Forced scalars do not have any scalarization overhead.
7275   auto ForcedScalar = ForcedScalars.find(VF);
7276   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7277     auto InstSet = ForcedScalar->second;
7278     if (InstSet.count(I))
7279       return VectorizationCostTy(
7280           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7281            VF.getKnownMinValue()),
7282           false);
7283   }
7284 
7285   Type *VectorTy;
7286   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7287 
7288   bool TypeNotScalarized =
7289       VF.isVector() && VectorTy->isVectorTy() &&
7290       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7291   return VectorizationCostTy(C, TypeNotScalarized);
7292 }
7293 
7294 InstructionCost
7295 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7296                                                      ElementCount VF) const {
7297 
7298   if (VF.isScalable())
7299     return InstructionCost::getInvalid();
7300 
7301   if (VF.isScalar())
7302     return 0;
7303 
7304   InstructionCost Cost = 0;
7305   Type *RetTy = ToVectorTy(I->getType(), VF);
7306   if (!RetTy->isVoidTy() &&
7307       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7308     Cost += TTI.getScalarizationOverhead(
7309         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7310         true, false);
7311 
7312   // Some targets keep addresses scalar.
7313   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7314     return Cost;
7315 
7316   // Some targets support efficient element stores.
7317   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7318     return Cost;
7319 
7320   // Collect operands to consider.
7321   CallInst *CI = dyn_cast<CallInst>(I);
7322   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7323 
7324   // Skip operands that do not require extraction/scalarization and do not incur
7325   // any overhead.
7326   SmallVector<Type *> Tys;
7327   for (auto *V : filterExtractingOperands(Ops, VF))
7328     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7329   return Cost + TTI.getOperandsScalarizationOverhead(
7330                     filterExtractingOperands(Ops, VF), Tys);
7331 }
7332 
7333 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7334   if (VF.isScalar())
7335     return;
7336   NumPredStores = 0;
7337   for (BasicBlock *BB : TheLoop->blocks()) {
7338     // For each instruction in the old loop.
7339     for (Instruction &I : *BB) {
7340       Value *Ptr =  getLoadStorePointerOperand(&I);
7341       if (!Ptr)
7342         continue;
7343 
7344       // TODO: We should generate better code and update the cost model for
7345       // predicated uniform stores. Today they are treated as any other
7346       // predicated store (see added test cases in
7347       // invariant-store-vectorization.ll).
7348       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7349         NumPredStores++;
7350 
7351       if (Legal->isUniformMemOp(I)) {
7352         // TODO: Avoid replicating loads and stores instead of
7353         // relying on instcombine to remove them.
7354         // Load: Scalar load + broadcast
7355         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7356         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7357         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7358         continue;
7359       }
7360 
7361       // We assume that widening is the best solution when possible.
7362       if (memoryInstructionCanBeWidened(&I, VF)) {
7363         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7364         int ConsecutiveStride =
7365                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7366         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7367                "Expected consecutive stride.");
7368         InstWidening Decision =
7369             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7370         setWideningDecision(&I, VF, Decision, Cost);
7371         continue;
7372       }
7373 
7374       // Choose between Interleaving, Gather/Scatter or Scalarization.
7375       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7376       unsigned NumAccesses = 1;
7377       if (isAccessInterleaved(&I)) {
7378         auto Group = getInterleavedAccessGroup(&I);
7379         assert(Group && "Fail to get an interleaved access group.");
7380 
7381         // Make one decision for the whole group.
7382         if (getWideningDecision(&I, VF) != CM_Unknown)
7383           continue;
7384 
7385         NumAccesses = Group->getNumMembers();
7386         if (interleavedAccessCanBeWidened(&I, VF))
7387           InterleaveCost = getInterleaveGroupCost(&I, VF);
7388       }
7389 
7390       InstructionCost GatherScatterCost =
7391           isLegalGatherOrScatter(&I)
7392               ? getGatherScatterCost(&I, VF) * NumAccesses
7393               : InstructionCost::getInvalid();
7394 
7395       InstructionCost ScalarizationCost =
7396           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7397 
7398       // Choose better solution for the current VF,
7399       // write down this decision and use it during vectorization.
7400       InstructionCost Cost;
7401       InstWidening Decision;
7402       if (InterleaveCost <= GatherScatterCost &&
7403           InterleaveCost < ScalarizationCost) {
7404         Decision = CM_Interleave;
7405         Cost = InterleaveCost;
7406       } else if (GatherScatterCost < ScalarizationCost) {
7407         Decision = CM_GatherScatter;
7408         Cost = GatherScatterCost;
7409       } else {
7410         assert(!VF.isScalable() &&
7411                "We cannot yet scalarise for scalable vectors");
7412         Decision = CM_Scalarize;
7413         Cost = ScalarizationCost;
7414       }
7415       // If the instructions belongs to an interleave group, the whole group
7416       // receives the same decision. The whole group receives the cost, but
7417       // the cost will actually be assigned to one instruction.
7418       if (auto Group = getInterleavedAccessGroup(&I))
7419         setWideningDecision(Group, VF, Decision, Cost);
7420       else
7421         setWideningDecision(&I, VF, Decision, Cost);
7422     }
7423   }
7424 
7425   // Make sure that any load of address and any other address computation
7426   // remains scalar unless there is gather/scatter support. This avoids
7427   // inevitable extracts into address registers, and also has the benefit of
7428   // activating LSR more, since that pass can't optimize vectorized
7429   // addresses.
7430   if (TTI.prefersVectorizedAddressing())
7431     return;
7432 
7433   // Start with all scalar pointer uses.
7434   SmallPtrSet<Instruction *, 8> AddrDefs;
7435   for (BasicBlock *BB : TheLoop->blocks())
7436     for (Instruction &I : *BB) {
7437       Instruction *PtrDef =
7438         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7439       if (PtrDef && TheLoop->contains(PtrDef) &&
7440           getWideningDecision(&I, VF) != CM_GatherScatter)
7441         AddrDefs.insert(PtrDef);
7442     }
7443 
7444   // Add all instructions used to generate the addresses.
7445   SmallVector<Instruction *, 4> Worklist;
7446   append_range(Worklist, AddrDefs);
7447   while (!Worklist.empty()) {
7448     Instruction *I = Worklist.pop_back_val();
7449     for (auto &Op : I->operands())
7450       if (auto *InstOp = dyn_cast<Instruction>(Op))
7451         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7452             AddrDefs.insert(InstOp).second)
7453           Worklist.push_back(InstOp);
7454   }
7455 
7456   for (auto *I : AddrDefs) {
7457     if (isa<LoadInst>(I)) {
7458       // Setting the desired widening decision should ideally be handled in
7459       // by cost functions, but since this involves the task of finding out
7460       // if the loaded register is involved in an address computation, it is
7461       // instead changed here when we know this is the case.
7462       InstWidening Decision = getWideningDecision(I, VF);
7463       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7464         // Scalarize a widened load of address.
7465         setWideningDecision(
7466             I, VF, CM_Scalarize,
7467             (VF.getKnownMinValue() *
7468              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7469       else if (auto Group = getInterleavedAccessGroup(I)) {
7470         // Scalarize an interleave group of address loads.
7471         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7472           if (Instruction *Member = Group->getMember(I))
7473             setWideningDecision(
7474                 Member, VF, CM_Scalarize,
7475                 (VF.getKnownMinValue() *
7476                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7477         }
7478       }
7479     } else
7480       // Make sure I gets scalarized and a cost estimate without
7481       // scalarization overhead.
7482       ForcedScalars[VF].insert(I);
7483   }
7484 }
7485 
7486 InstructionCost
7487 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7488                                                Type *&VectorTy) {
7489   Type *RetTy = I->getType();
7490   if (canTruncateToMinimalBitwidth(I, VF))
7491     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7492   auto SE = PSE.getSE();
7493   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7494 
7495   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7496                                                 ElementCount VF) -> bool {
7497     if (VF.isScalar())
7498       return true;
7499 
7500     auto Scalarized = InstsToScalarize.find(VF);
7501     assert(Scalarized != InstsToScalarize.end() &&
7502            "VF not yet analyzed for scalarization profitability");
7503     return !Scalarized->second.count(I) &&
7504            llvm::all_of(I->users(), [&](User *U) {
7505              auto *UI = cast<Instruction>(U);
7506              return !Scalarized->second.count(UI);
7507            });
7508   };
7509   (void) hasSingleCopyAfterVectorization;
7510 
7511   if (isScalarAfterVectorization(I, VF)) {
7512     // With the exception of GEPs and PHIs, after scalarization there should
7513     // only be one copy of the instruction generated in the loop. This is
7514     // because the VF is either 1, or any instructions that need scalarizing
7515     // have already been dealt with by the the time we get here. As a result,
7516     // it means we don't have to multiply the instruction cost by VF.
7517     assert(I->getOpcode() == Instruction::GetElementPtr ||
7518            I->getOpcode() == Instruction::PHI ||
7519            (I->getOpcode() == Instruction::BitCast &&
7520             I->getType()->isPointerTy()) ||
7521            hasSingleCopyAfterVectorization(I, VF));
7522     VectorTy = RetTy;
7523   } else
7524     VectorTy = ToVectorTy(RetTy, VF);
7525 
7526   // TODO: We need to estimate the cost of intrinsic calls.
7527   switch (I->getOpcode()) {
7528   case Instruction::GetElementPtr:
7529     // We mark this instruction as zero-cost because the cost of GEPs in
7530     // vectorized code depends on whether the corresponding memory instruction
7531     // is scalarized or not. Therefore, we handle GEPs with the memory
7532     // instruction cost.
7533     return 0;
7534   case Instruction::Br: {
7535     // In cases of scalarized and predicated instructions, there will be VF
7536     // predicated blocks in the vectorized loop. Each branch around these
7537     // blocks requires also an extract of its vector compare i1 element.
7538     bool ScalarPredicatedBB = false;
7539     BranchInst *BI = cast<BranchInst>(I);
7540     if (VF.isVector() && BI->isConditional() &&
7541         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7542          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7543       ScalarPredicatedBB = true;
7544 
7545     if (ScalarPredicatedBB) {
7546       // Return cost for branches around scalarized and predicated blocks.
7547       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7548       auto *Vec_i1Ty =
7549           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7550       return (TTI.getScalarizationOverhead(
7551                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7552                   false, true) +
7553               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7554                VF.getKnownMinValue()));
7555     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7556       // The back-edge branch will remain, as will all scalar branches.
7557       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7558     else
7559       // This branch will be eliminated by if-conversion.
7560       return 0;
7561     // Note: We currently assume zero cost for an unconditional branch inside
7562     // a predicated block since it will become a fall-through, although we
7563     // may decide in the future to call TTI for all branches.
7564   }
7565   case Instruction::PHI: {
7566     auto *Phi = cast<PHINode>(I);
7567 
7568     // First-order recurrences are replaced by vector shuffles inside the loop.
7569     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7570     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7571       return TTI.getShuffleCost(
7572           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7573           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7574 
7575     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7576     // converted into select instructions. We require N - 1 selects per phi
7577     // node, where N is the number of incoming values.
7578     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7579       return (Phi->getNumIncomingValues() - 1) *
7580              TTI.getCmpSelInstrCost(
7581                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7582                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7583                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7584 
7585     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7586   }
7587   case Instruction::UDiv:
7588   case Instruction::SDiv:
7589   case Instruction::URem:
7590   case Instruction::SRem:
7591     // If we have a predicated instruction, it may not be executed for each
7592     // vector lane. Get the scalarization cost and scale this amount by the
7593     // probability of executing the predicated block. If the instruction is not
7594     // predicated, we fall through to the next case.
7595     if (VF.isVector() && isScalarWithPredication(I)) {
7596       InstructionCost Cost = 0;
7597 
7598       // These instructions have a non-void type, so account for the phi nodes
7599       // that we will create. This cost is likely to be zero. The phi node
7600       // cost, if any, should be scaled by the block probability because it
7601       // models a copy at the end of each predicated block.
7602       Cost += VF.getKnownMinValue() *
7603               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7604 
7605       // The cost of the non-predicated instruction.
7606       Cost += VF.getKnownMinValue() *
7607               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7608 
7609       // The cost of insertelement and extractelement instructions needed for
7610       // scalarization.
7611       Cost += getScalarizationOverhead(I, VF);
7612 
7613       // Scale the cost by the probability of executing the predicated blocks.
7614       // This assumes the predicated block for each vector lane is equally
7615       // likely.
7616       return Cost / getReciprocalPredBlockProb();
7617     }
7618     LLVM_FALLTHROUGH;
7619   case Instruction::Add:
7620   case Instruction::FAdd:
7621   case Instruction::Sub:
7622   case Instruction::FSub:
7623   case Instruction::Mul:
7624   case Instruction::FMul:
7625   case Instruction::FDiv:
7626   case Instruction::FRem:
7627   case Instruction::Shl:
7628   case Instruction::LShr:
7629   case Instruction::AShr:
7630   case Instruction::And:
7631   case Instruction::Or:
7632   case Instruction::Xor: {
7633     // Since we will replace the stride by 1 the multiplication should go away.
7634     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7635       return 0;
7636 
7637     // Detect reduction patterns
7638     InstructionCost RedCost;
7639     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7640             .isValid())
7641       return RedCost;
7642 
7643     // Certain instructions can be cheaper to vectorize if they have a constant
7644     // second vector operand. One example of this are shifts on x86.
7645     Value *Op2 = I->getOperand(1);
7646     TargetTransformInfo::OperandValueProperties Op2VP;
7647     TargetTransformInfo::OperandValueKind Op2VK =
7648         TTI.getOperandInfo(Op2, Op2VP);
7649     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7650       Op2VK = TargetTransformInfo::OK_UniformValue;
7651 
7652     SmallVector<const Value *, 4> Operands(I->operand_values());
7653     return TTI.getArithmeticInstrCost(
7654         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7655         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7656   }
7657   case Instruction::FNeg: {
7658     return TTI.getArithmeticInstrCost(
7659         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7660         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7661         TargetTransformInfo::OP_None, I->getOperand(0), I);
7662   }
7663   case Instruction::Select: {
7664     SelectInst *SI = cast<SelectInst>(I);
7665     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7666     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7667 
7668     const Value *Op0, *Op1;
7669     using namespace llvm::PatternMatch;
7670     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7671                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7672       // select x, y, false --> x & y
7673       // select x, true, y --> x | y
7674       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7675       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7676       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7677       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7678       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7679               Op1->getType()->getScalarSizeInBits() == 1);
7680 
7681       SmallVector<const Value *, 2> Operands{Op0, Op1};
7682       return TTI.getArithmeticInstrCost(
7683           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7684           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7685     }
7686 
7687     Type *CondTy = SI->getCondition()->getType();
7688     if (!ScalarCond)
7689       CondTy = VectorType::get(CondTy, VF);
7690     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7691                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7692   }
7693   case Instruction::ICmp:
7694   case Instruction::FCmp: {
7695     Type *ValTy = I->getOperand(0)->getType();
7696     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7697     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7698       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7699     VectorTy = ToVectorTy(ValTy, VF);
7700     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7701                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7702   }
7703   case Instruction::Store:
7704   case Instruction::Load: {
7705     ElementCount Width = VF;
7706     if (Width.isVector()) {
7707       InstWidening Decision = getWideningDecision(I, Width);
7708       assert(Decision != CM_Unknown &&
7709              "CM decision should be taken at this point");
7710       if (Decision == CM_Scalarize)
7711         Width = ElementCount::getFixed(1);
7712     }
7713     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7714     return getMemoryInstructionCost(I, VF);
7715   }
7716   case Instruction::BitCast:
7717     if (I->getType()->isPointerTy())
7718       return 0;
7719     LLVM_FALLTHROUGH;
7720   case Instruction::ZExt:
7721   case Instruction::SExt:
7722   case Instruction::FPToUI:
7723   case Instruction::FPToSI:
7724   case Instruction::FPExt:
7725   case Instruction::PtrToInt:
7726   case Instruction::IntToPtr:
7727   case Instruction::SIToFP:
7728   case Instruction::UIToFP:
7729   case Instruction::Trunc:
7730   case Instruction::FPTrunc: {
7731     // Computes the CastContextHint from a Load/Store instruction.
7732     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7733       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7734              "Expected a load or a store!");
7735 
7736       if (VF.isScalar() || !TheLoop->contains(I))
7737         return TTI::CastContextHint::Normal;
7738 
7739       switch (getWideningDecision(I, VF)) {
7740       case LoopVectorizationCostModel::CM_GatherScatter:
7741         return TTI::CastContextHint::GatherScatter;
7742       case LoopVectorizationCostModel::CM_Interleave:
7743         return TTI::CastContextHint::Interleave;
7744       case LoopVectorizationCostModel::CM_Scalarize:
7745       case LoopVectorizationCostModel::CM_Widen:
7746         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7747                                         : TTI::CastContextHint::Normal;
7748       case LoopVectorizationCostModel::CM_Widen_Reverse:
7749         return TTI::CastContextHint::Reversed;
7750       case LoopVectorizationCostModel::CM_Unknown:
7751         llvm_unreachable("Instr did not go through cost modelling?");
7752       }
7753 
7754       llvm_unreachable("Unhandled case!");
7755     };
7756 
7757     unsigned Opcode = I->getOpcode();
7758     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7759     // For Trunc, the context is the only user, which must be a StoreInst.
7760     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7761       if (I->hasOneUse())
7762         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7763           CCH = ComputeCCH(Store);
7764     }
7765     // For Z/Sext, the context is the operand, which must be a LoadInst.
7766     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7767              Opcode == Instruction::FPExt) {
7768       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7769         CCH = ComputeCCH(Load);
7770     }
7771 
7772     // We optimize the truncation of induction variables having constant
7773     // integer steps. The cost of these truncations is the same as the scalar
7774     // operation.
7775     if (isOptimizableIVTruncate(I, VF)) {
7776       auto *Trunc = cast<TruncInst>(I);
7777       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7778                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7779     }
7780 
7781     // Detect reduction patterns
7782     InstructionCost RedCost;
7783     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7784             .isValid())
7785       return RedCost;
7786 
7787     Type *SrcScalarTy = I->getOperand(0)->getType();
7788     Type *SrcVecTy =
7789         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7790     if (canTruncateToMinimalBitwidth(I, VF)) {
7791       // This cast is going to be shrunk. This may remove the cast or it might
7792       // turn it into slightly different cast. For example, if MinBW == 16,
7793       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7794       //
7795       // Calculate the modified src and dest types.
7796       Type *MinVecTy = VectorTy;
7797       if (Opcode == Instruction::Trunc) {
7798         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7799         VectorTy =
7800             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7801       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7802         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7803         VectorTy =
7804             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7805       }
7806     }
7807 
7808     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7809   }
7810   case Instruction::Call: {
7811     bool NeedToScalarize;
7812     CallInst *CI = cast<CallInst>(I);
7813     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7814     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7815       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7816       return std::min(CallCost, IntrinsicCost);
7817     }
7818     return CallCost;
7819   }
7820   case Instruction::ExtractValue:
7821     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7822   default:
7823     // This opcode is unknown. Assume that it is the same as 'mul'.
7824     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7825   } // end of switch.
7826 }
7827 
7828 char LoopVectorize::ID = 0;
7829 
7830 static const char lv_name[] = "Loop Vectorization";
7831 
7832 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7833 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7834 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7835 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7836 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7837 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7838 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7839 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7840 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7841 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7842 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7843 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7844 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7845 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7846 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7847 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7848 
7849 namespace llvm {
7850 
7851 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7852 
7853 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7854                               bool VectorizeOnlyWhenForced) {
7855   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7856 }
7857 
7858 } // end namespace llvm
7859 
7860 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7861   // Check if the pointer operand of a load or store instruction is
7862   // consecutive.
7863   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7864     return Legal->isConsecutivePtr(Ptr);
7865   return false;
7866 }
7867 
7868 void LoopVectorizationCostModel::collectValuesToIgnore() {
7869   // Ignore ephemeral values.
7870   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7871 
7872   // Ignore type-promoting instructions we identified during reduction
7873   // detection.
7874   for (auto &Reduction : Legal->getReductionVars()) {
7875     RecurrenceDescriptor &RedDes = Reduction.second;
7876     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7877     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7878   }
7879   // Ignore type-casting instructions we identified during induction
7880   // detection.
7881   for (auto &Induction : Legal->getInductionVars()) {
7882     InductionDescriptor &IndDes = Induction.second;
7883     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7884     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7885   }
7886 }
7887 
7888 void LoopVectorizationCostModel::collectInLoopReductions() {
7889   for (auto &Reduction : Legal->getReductionVars()) {
7890     PHINode *Phi = Reduction.first;
7891     RecurrenceDescriptor &RdxDesc = Reduction.second;
7892 
7893     // We don't collect reductions that are type promoted (yet).
7894     if (RdxDesc.getRecurrenceType() != Phi->getType())
7895       continue;
7896 
7897     // If the target would prefer this reduction to happen "in-loop", then we
7898     // want to record it as such.
7899     unsigned Opcode = RdxDesc.getOpcode();
7900     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7901         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7902                                    TargetTransformInfo::ReductionFlags()))
7903       continue;
7904 
7905     // Check that we can correctly put the reductions into the loop, by
7906     // finding the chain of operations that leads from the phi to the loop
7907     // exit value.
7908     SmallVector<Instruction *, 4> ReductionOperations =
7909         RdxDesc.getReductionOpChain(Phi, TheLoop);
7910     bool InLoop = !ReductionOperations.empty();
7911     if (InLoop) {
7912       InLoopReductionChains[Phi] = ReductionOperations;
7913       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7914       Instruction *LastChain = Phi;
7915       for (auto *I : ReductionOperations) {
7916         InLoopReductionImmediateChains[I] = LastChain;
7917         LastChain = I;
7918       }
7919     }
7920     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7921                       << " reduction for phi: " << *Phi << "\n");
7922   }
7923 }
7924 
7925 // TODO: we could return a pair of values that specify the max VF and
7926 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7927 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7928 // doesn't have a cost model that can choose which plan to execute if
7929 // more than one is generated.
7930 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7931                                  LoopVectorizationCostModel &CM) {
7932   unsigned WidestType;
7933   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7934   return WidestVectorRegBits / WidestType;
7935 }
7936 
7937 VectorizationFactor
7938 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7939   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7940   ElementCount VF = UserVF;
7941   // Outer loop handling: They may require CFG and instruction level
7942   // transformations before even evaluating whether vectorization is profitable.
7943   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7944   // the vectorization pipeline.
7945   if (!OrigLoop->isInnermost()) {
7946     // If the user doesn't provide a vectorization factor, determine a
7947     // reasonable one.
7948     if (UserVF.isZero()) {
7949       VF = ElementCount::getFixed(determineVPlanVF(
7950           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7951               .getFixedSize(),
7952           CM));
7953       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7954 
7955       // Make sure we have a VF > 1 for stress testing.
7956       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7957         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7958                           << "overriding computed VF.\n");
7959         VF = ElementCount::getFixed(4);
7960       }
7961     }
7962     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7963     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7964            "VF needs to be a power of two");
7965     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7966                       << "VF " << VF << " to build VPlans.\n");
7967     buildVPlans(VF, VF);
7968 
7969     // For VPlan build stress testing, we bail out after VPlan construction.
7970     if (VPlanBuildStressTest)
7971       return VectorizationFactor::Disabled();
7972 
7973     return {VF, 0 /*Cost*/};
7974   }
7975 
7976   LLVM_DEBUG(
7977       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7978                 "VPlan-native path.\n");
7979   return VectorizationFactor::Disabled();
7980 }
7981 
7982 Optional<VectorizationFactor>
7983 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7984   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7985   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7986   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7987     return None;
7988 
7989   // Invalidate interleave groups if all blocks of loop will be predicated.
7990   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7991       !useMaskedInterleavedAccesses(*TTI)) {
7992     LLVM_DEBUG(
7993         dbgs()
7994         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7995            "which requires masked-interleaved support.\n");
7996     if (CM.InterleaveInfo.invalidateGroups())
7997       // Invalidating interleave groups also requires invalidating all decisions
7998       // based on them, which includes widening decisions and uniform and scalar
7999       // values.
8000       CM.invalidateCostModelingDecisions();
8001   }
8002 
8003   ElementCount MaxUserVF =
8004       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
8005   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
8006   if (!UserVF.isZero() && UserVFIsLegal) {
8007     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
8008                       << " VF " << UserVF << ".\n");
8009     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
8010            "VF needs to be a power of two");
8011     // Collect the instructions (and their associated costs) that will be more
8012     // profitable to scalarize.
8013     CM.selectUserVectorizationFactor(UserVF);
8014     CM.collectInLoopReductions();
8015     buildVPlansWithVPRecipes(UserVF, UserVF);
8016     LLVM_DEBUG(printPlans(dbgs()));
8017     return {{UserVF, 0}};
8018   }
8019 
8020   // Populate the set of Vectorization Factor Candidates.
8021   ElementCountSet VFCandidates;
8022   for (auto VF = ElementCount::getFixed(1);
8023        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
8024     VFCandidates.insert(VF);
8025   for (auto VF = ElementCount::getScalable(1);
8026        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
8027     VFCandidates.insert(VF);
8028 
8029   for (const auto &VF : VFCandidates) {
8030     // Collect Uniform and Scalar instructions after vectorization with VF.
8031     CM.collectUniformsAndScalars(VF);
8032 
8033     // Collect the instructions (and their associated costs) that will be more
8034     // profitable to scalarize.
8035     if (VF.isVector())
8036       CM.collectInstsToScalarize(VF);
8037   }
8038 
8039   CM.collectInLoopReductions();
8040   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
8041   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
8042 
8043   LLVM_DEBUG(printPlans(dbgs()));
8044   if (!MaxFactors.hasVector())
8045     return VectorizationFactor::Disabled();
8046 
8047   // Select the optimal vectorization factor.
8048   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
8049 
8050   // Check if it is profitable to vectorize with runtime checks.
8051   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
8052   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
8053     bool PragmaThresholdReached =
8054         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
8055     bool ThresholdReached =
8056         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
8057     if ((ThresholdReached && !Hints.allowReordering()) ||
8058         PragmaThresholdReached) {
8059       ORE->emit([&]() {
8060         return OptimizationRemarkAnalysisAliasing(
8061                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
8062                    OrigLoop->getHeader())
8063                << "loop not vectorized: cannot prove it is safe to reorder "
8064                   "memory operations";
8065       });
8066       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8067       Hints.emitRemarkWithHints();
8068       return VectorizationFactor::Disabled();
8069     }
8070   }
8071   return SelectedVF;
8072 }
8073 
8074 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
8075   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
8076                     << '\n');
8077   BestVF = VF;
8078   BestUF = UF;
8079 
8080   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
8081     return !Plan->hasVF(VF);
8082   });
8083   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
8084 }
8085 
8086 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
8087                                            DominatorTree *DT) {
8088   // Perform the actual loop transformation.
8089 
8090   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
8091   assert(BestVF.hasValue() && "Vectorization Factor is missing");
8092   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
8093 
8094   VPTransformState State{
8095       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
8096   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
8097   State.TripCount = ILV.getOrCreateTripCount(nullptr);
8098   State.CanonicalIV = ILV.Induction;
8099 
8100   ILV.printDebugTracesAtStart();
8101 
8102   //===------------------------------------------------===//
8103   //
8104   // Notice: any optimization or new instruction that go
8105   // into the code below should also be implemented in
8106   // the cost-model.
8107   //
8108   //===------------------------------------------------===//
8109 
8110   // 2. Copy and widen instructions from the old loop into the new loop.
8111   VPlans.front()->execute(&State);
8112 
8113   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8114   //    predication, updating analyses.
8115   ILV.fixVectorizedLoop(State);
8116 
8117   ILV.printDebugTracesAtEnd();
8118 }
8119 
8120 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8121 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8122   for (const auto &Plan : VPlans)
8123     if (PrintVPlansInDotFormat)
8124       Plan->printDOT(O);
8125     else
8126       Plan->print(O);
8127 }
8128 #endif
8129 
8130 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8131     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8132 
8133   // We create new control-flow for the vectorized loop, so the original exit
8134   // conditions will be dead after vectorization if it's only used by the
8135   // terminator
8136   SmallVector<BasicBlock*> ExitingBlocks;
8137   OrigLoop->getExitingBlocks(ExitingBlocks);
8138   for (auto *BB : ExitingBlocks) {
8139     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8140     if (!Cmp || !Cmp->hasOneUse())
8141       continue;
8142 
8143     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8144     if (!DeadInstructions.insert(Cmp).second)
8145       continue;
8146 
8147     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8148     // TODO: can recurse through operands in general
8149     for (Value *Op : Cmp->operands()) {
8150       if (isa<TruncInst>(Op) && Op->hasOneUse())
8151           DeadInstructions.insert(cast<Instruction>(Op));
8152     }
8153   }
8154 
8155   // We create new "steps" for induction variable updates to which the original
8156   // induction variables map. An original update instruction will be dead if
8157   // all its users except the induction variable are dead.
8158   auto *Latch = OrigLoop->getLoopLatch();
8159   for (auto &Induction : Legal->getInductionVars()) {
8160     PHINode *Ind = Induction.first;
8161     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8162 
8163     // If the tail is to be folded by masking, the primary induction variable,
8164     // if exists, isn't dead: it will be used for masking. Don't kill it.
8165     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8166       continue;
8167 
8168     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8169           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8170         }))
8171       DeadInstructions.insert(IndUpdate);
8172 
8173     // We record as "Dead" also the type-casting instructions we had identified
8174     // during induction analysis. We don't need any handling for them in the
8175     // vectorized loop because we have proven that, under a proper runtime
8176     // test guarding the vectorized loop, the value of the phi, and the casted
8177     // value of the phi, are the same. The last instruction in this casting chain
8178     // will get its scalar/vector/widened def from the scalar/vector/widened def
8179     // of the respective phi node. Any other casts in the induction def-use chain
8180     // have no other uses outside the phi update chain, and will be ignored.
8181     InductionDescriptor &IndDes = Induction.second;
8182     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
8183     DeadInstructions.insert(Casts.begin(), Casts.end());
8184   }
8185 }
8186 
8187 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
8188 
8189 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8190 
8191 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
8192                                         Instruction::BinaryOps BinOp) {
8193   // When unrolling and the VF is 1, we only need to add a simple scalar.
8194   Type *Ty = Val->getType();
8195   assert(!Ty->isVectorTy() && "Val must be a scalar");
8196 
8197   if (Ty->isFloatingPointTy()) {
8198     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
8199 
8200     // Floating-point operations inherit FMF via the builder's flags.
8201     Value *MulOp = Builder.CreateFMul(C, Step);
8202     return Builder.CreateBinOp(BinOp, Val, MulOp);
8203   }
8204   Constant *C = ConstantInt::get(Ty, StartIdx);
8205   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
8206 }
8207 
8208 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8209   SmallVector<Metadata *, 4> MDs;
8210   // Reserve first location for self reference to the LoopID metadata node.
8211   MDs.push_back(nullptr);
8212   bool IsUnrollMetadata = false;
8213   MDNode *LoopID = L->getLoopID();
8214   if (LoopID) {
8215     // First find existing loop unrolling disable metadata.
8216     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8217       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8218       if (MD) {
8219         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8220         IsUnrollMetadata =
8221             S && S->getString().startswith("llvm.loop.unroll.disable");
8222       }
8223       MDs.push_back(LoopID->getOperand(i));
8224     }
8225   }
8226 
8227   if (!IsUnrollMetadata) {
8228     // Add runtime unroll disable metadata.
8229     LLVMContext &Context = L->getHeader()->getContext();
8230     SmallVector<Metadata *, 1> DisableOperands;
8231     DisableOperands.push_back(
8232         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8233     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8234     MDs.push_back(DisableNode);
8235     MDNode *NewLoopID = MDNode::get(Context, MDs);
8236     // Set operand 0 to refer to the loop id itself.
8237     NewLoopID->replaceOperandWith(0, NewLoopID);
8238     L->setLoopID(NewLoopID);
8239   }
8240 }
8241 
8242 //===--------------------------------------------------------------------===//
8243 // EpilogueVectorizerMainLoop
8244 //===--------------------------------------------------------------------===//
8245 
8246 /// This function is partially responsible for generating the control flow
8247 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8248 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8249   MDNode *OrigLoopID = OrigLoop->getLoopID();
8250   Loop *Lp = createVectorLoopSkeleton("");
8251 
8252   // Generate the code to check the minimum iteration count of the vector
8253   // epilogue (see below).
8254   EPI.EpilogueIterationCountCheck =
8255       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8256   EPI.EpilogueIterationCountCheck->setName("iter.check");
8257 
8258   // Generate the code to check any assumptions that we've made for SCEV
8259   // expressions.
8260   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8261 
8262   // Generate the code that checks at runtime if arrays overlap. We put the
8263   // checks into a separate block to make the more common case of few elements
8264   // faster.
8265   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8266 
8267   // Generate the iteration count check for the main loop, *after* the check
8268   // for the epilogue loop, so that the path-length is shorter for the case
8269   // that goes directly through the vector epilogue. The longer-path length for
8270   // the main loop is compensated for, by the gain from vectorizing the larger
8271   // trip count. Note: the branch will get updated later on when we vectorize
8272   // the epilogue.
8273   EPI.MainLoopIterationCountCheck =
8274       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8275 
8276   // Generate the induction variable.
8277   OldInduction = Legal->getPrimaryInduction();
8278   Type *IdxTy = Legal->getWidestInductionType();
8279   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8280   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8281   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8282   EPI.VectorTripCount = CountRoundDown;
8283   Induction =
8284       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8285                               getDebugLocFromInstOrOperands(OldInduction));
8286 
8287   // Skip induction resume value creation here because they will be created in
8288   // the second pass. If we created them here, they wouldn't be used anyway,
8289   // because the vplan in the second pass still contains the inductions from the
8290   // original loop.
8291 
8292   return completeLoopSkeleton(Lp, OrigLoopID);
8293 }
8294 
8295 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8296   LLVM_DEBUG({
8297     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8298            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8299            << ", Main Loop UF:" << EPI.MainLoopUF
8300            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8301            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8302   });
8303 }
8304 
8305 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8306   DEBUG_WITH_TYPE(VerboseDebug, {
8307     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8308   });
8309 }
8310 
8311 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8312     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8313   assert(L && "Expected valid Loop.");
8314   assert(Bypass && "Expected valid bypass basic block.");
8315   unsigned VFactor =
8316       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8317   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8318   Value *Count = getOrCreateTripCount(L);
8319   // Reuse existing vector loop preheader for TC checks.
8320   // Note that new preheader block is generated for vector loop.
8321   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8322   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8323 
8324   // Generate code to check if the loop's trip count is less than VF * UF of the
8325   // main vector loop.
8326   auto P =
8327       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8328 
8329   Value *CheckMinIters = Builder.CreateICmp(
8330       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8331       "min.iters.check");
8332 
8333   if (!ForEpilogue)
8334     TCCheckBlock->setName("vector.main.loop.iter.check");
8335 
8336   // Create new preheader for vector loop.
8337   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8338                                    DT, LI, nullptr, "vector.ph");
8339 
8340   if (ForEpilogue) {
8341     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8342                                  DT->getNode(Bypass)->getIDom()) &&
8343            "TC check is expected to dominate Bypass");
8344 
8345     // Update dominator for Bypass & LoopExit.
8346     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8347     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8348 
8349     LoopBypassBlocks.push_back(TCCheckBlock);
8350 
8351     // Save the trip count so we don't have to regenerate it in the
8352     // vec.epilog.iter.check. This is safe to do because the trip count
8353     // generated here dominates the vector epilog iter check.
8354     EPI.TripCount = Count;
8355   }
8356 
8357   ReplaceInstWithInst(
8358       TCCheckBlock->getTerminator(),
8359       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8360 
8361   return TCCheckBlock;
8362 }
8363 
8364 //===--------------------------------------------------------------------===//
8365 // EpilogueVectorizerEpilogueLoop
8366 //===--------------------------------------------------------------------===//
8367 
8368 /// This function is partially responsible for generating the control flow
8369 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8370 BasicBlock *
8371 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8372   MDNode *OrigLoopID = OrigLoop->getLoopID();
8373   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8374 
8375   // Now, compare the remaining count and if there aren't enough iterations to
8376   // execute the vectorized epilogue skip to the scalar part.
8377   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8378   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8379   LoopVectorPreHeader =
8380       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8381                  LI, nullptr, "vec.epilog.ph");
8382   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8383                                           VecEpilogueIterationCountCheck);
8384 
8385   // Adjust the control flow taking the state info from the main loop
8386   // vectorization into account.
8387   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8388          "expected this to be saved from the previous pass.");
8389   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8390       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8391 
8392   DT->changeImmediateDominator(LoopVectorPreHeader,
8393                                EPI.MainLoopIterationCountCheck);
8394 
8395   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8396       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8397 
8398   if (EPI.SCEVSafetyCheck)
8399     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8400         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8401   if (EPI.MemSafetyCheck)
8402     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8403         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8404 
8405   DT->changeImmediateDominator(
8406       VecEpilogueIterationCountCheck,
8407       VecEpilogueIterationCountCheck->getSinglePredecessor());
8408 
8409   DT->changeImmediateDominator(LoopScalarPreHeader,
8410                                EPI.EpilogueIterationCountCheck);
8411   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8412 
8413   // Keep track of bypass blocks, as they feed start values to the induction
8414   // phis in the scalar loop preheader.
8415   if (EPI.SCEVSafetyCheck)
8416     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8417   if (EPI.MemSafetyCheck)
8418     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8419   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8420 
8421   // Generate a resume induction for the vector epilogue and put it in the
8422   // vector epilogue preheader
8423   Type *IdxTy = Legal->getWidestInductionType();
8424   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8425                                          LoopVectorPreHeader->getFirstNonPHI());
8426   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8427   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8428                            EPI.MainLoopIterationCountCheck);
8429 
8430   // Generate the induction variable.
8431   OldInduction = Legal->getPrimaryInduction();
8432   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8433   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8434   Value *StartIdx = EPResumeVal;
8435   Induction =
8436       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8437                               getDebugLocFromInstOrOperands(OldInduction));
8438 
8439   // Generate induction resume values. These variables save the new starting
8440   // indexes for the scalar loop. They are used to test if there are any tail
8441   // iterations left once the vector loop has completed.
8442   // Note that when the vectorized epilogue is skipped due to iteration count
8443   // check, then the resume value for the induction variable comes from
8444   // the trip count of the main vector loop, hence passing the AdditionalBypass
8445   // argument.
8446   createInductionResumeValues(Lp, CountRoundDown,
8447                               {VecEpilogueIterationCountCheck,
8448                                EPI.VectorTripCount} /* AdditionalBypass */);
8449 
8450   AddRuntimeUnrollDisableMetaData(Lp);
8451   return completeLoopSkeleton(Lp, OrigLoopID);
8452 }
8453 
8454 BasicBlock *
8455 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8456     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8457 
8458   assert(EPI.TripCount &&
8459          "Expected trip count to have been safed in the first pass.");
8460   assert(
8461       (!isa<Instruction>(EPI.TripCount) ||
8462        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8463       "saved trip count does not dominate insertion point.");
8464   Value *TC = EPI.TripCount;
8465   IRBuilder<> Builder(Insert->getTerminator());
8466   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8467 
8468   // Generate code to check if the loop's trip count is less than VF * UF of the
8469   // vector epilogue loop.
8470   auto P =
8471       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8472 
8473   Value *CheckMinIters = Builder.CreateICmp(
8474       P, Count,
8475       ConstantInt::get(Count->getType(),
8476                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8477       "min.epilog.iters.check");
8478 
8479   ReplaceInstWithInst(
8480       Insert->getTerminator(),
8481       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8482 
8483   LoopBypassBlocks.push_back(Insert);
8484   return Insert;
8485 }
8486 
8487 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8488   LLVM_DEBUG({
8489     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8490            << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8491            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8492   });
8493 }
8494 
8495 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8496   DEBUG_WITH_TYPE(VerboseDebug, {
8497     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8498   });
8499 }
8500 
8501 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8502     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8503   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8504   bool PredicateAtRangeStart = Predicate(Range.Start);
8505 
8506   for (ElementCount TmpVF = Range.Start * 2;
8507        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8508     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8509       Range.End = TmpVF;
8510       break;
8511     }
8512 
8513   return PredicateAtRangeStart;
8514 }
8515 
8516 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8517 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8518 /// of VF's starting at a given VF and extending it as much as possible. Each
8519 /// vectorization decision can potentially shorten this sub-range during
8520 /// buildVPlan().
8521 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8522                                            ElementCount MaxVF) {
8523   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8524   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8525     VFRange SubRange = {VF, MaxVFPlusOne};
8526     VPlans.push_back(buildVPlan(SubRange));
8527     VF = SubRange.End;
8528   }
8529 }
8530 
8531 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8532                                          VPlanPtr &Plan) {
8533   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8534 
8535   // Look for cached value.
8536   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8537   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8538   if (ECEntryIt != EdgeMaskCache.end())
8539     return ECEntryIt->second;
8540 
8541   VPValue *SrcMask = createBlockInMask(Src, Plan);
8542 
8543   // The terminator has to be a branch inst!
8544   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8545   assert(BI && "Unexpected terminator found");
8546 
8547   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8548     return EdgeMaskCache[Edge] = SrcMask;
8549 
8550   // If source is an exiting block, we know the exit edge is dynamically dead
8551   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8552   // adding uses of an otherwise potentially dead instruction.
8553   if (OrigLoop->isLoopExiting(Src))
8554     return EdgeMaskCache[Edge] = SrcMask;
8555 
8556   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8557   assert(EdgeMask && "No Edge Mask found for condition");
8558 
8559   if (BI->getSuccessor(0) != Dst)
8560     EdgeMask = Builder.createNot(EdgeMask);
8561 
8562   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8563     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8564     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8565     // The select version does not introduce new UB if SrcMask is false and
8566     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8567     VPValue *False = Plan->getOrAddVPValue(
8568         ConstantInt::getFalse(BI->getCondition()->getType()));
8569     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8570   }
8571 
8572   return EdgeMaskCache[Edge] = EdgeMask;
8573 }
8574 
8575 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8576   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8577 
8578   // Look for cached value.
8579   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8580   if (BCEntryIt != BlockMaskCache.end())
8581     return BCEntryIt->second;
8582 
8583   // All-one mask is modelled as no-mask following the convention for masked
8584   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8585   VPValue *BlockMask = nullptr;
8586 
8587   if (OrigLoop->getHeader() == BB) {
8588     if (!CM.blockNeedsPredication(BB))
8589       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8590 
8591     // Create the block in mask as the first non-phi instruction in the block.
8592     VPBuilder::InsertPointGuard Guard(Builder);
8593     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8594     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8595 
8596     // Introduce the early-exit compare IV <= BTC to form header block mask.
8597     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8598     // Start by constructing the desired canonical IV.
8599     VPValue *IV = nullptr;
8600     if (Legal->getPrimaryInduction())
8601       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8602     else {
8603       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8604       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8605       IV = IVRecipe->getVPSingleValue();
8606     }
8607     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8608     bool TailFolded = !CM.isScalarEpilogueAllowed();
8609 
8610     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8611       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8612       // as a second argument, we only pass the IV here and extract the
8613       // tripcount from the transform state where codegen of the VP instructions
8614       // happen.
8615       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8616     } else {
8617       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8618     }
8619     return BlockMaskCache[BB] = BlockMask;
8620   }
8621 
8622   // This is the block mask. We OR all incoming edges.
8623   for (auto *Predecessor : predecessors(BB)) {
8624     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8625     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8626       return BlockMaskCache[BB] = EdgeMask;
8627 
8628     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8629       BlockMask = EdgeMask;
8630       continue;
8631     }
8632 
8633     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8634   }
8635 
8636   return BlockMaskCache[BB] = BlockMask;
8637 }
8638 
8639 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8640                                                 ArrayRef<VPValue *> Operands,
8641                                                 VFRange &Range,
8642                                                 VPlanPtr &Plan) {
8643   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8644          "Must be called with either a load or store");
8645 
8646   auto willWiden = [&](ElementCount VF) -> bool {
8647     if (VF.isScalar())
8648       return false;
8649     LoopVectorizationCostModel::InstWidening Decision =
8650         CM.getWideningDecision(I, VF);
8651     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8652            "CM decision should be taken at this point.");
8653     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8654       return true;
8655     if (CM.isScalarAfterVectorization(I, VF) ||
8656         CM.isProfitableToScalarize(I, VF))
8657       return false;
8658     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8659   };
8660 
8661   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8662     return nullptr;
8663 
8664   VPValue *Mask = nullptr;
8665   if (Legal->isMaskRequired(I))
8666     Mask = createBlockInMask(I->getParent(), Plan);
8667 
8668   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8669     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask);
8670 
8671   StoreInst *Store = cast<StoreInst>(I);
8672   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8673                                             Mask);
8674 }
8675 
8676 VPWidenIntOrFpInductionRecipe *
8677 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8678                                            ArrayRef<VPValue *> Operands) const {
8679   // Check if this is an integer or fp induction. If so, build the recipe that
8680   // produces its scalar and vector values.
8681   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8682   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8683       II.getKind() == InductionDescriptor::IK_FpInduction) {
8684     assert(II.getStartValue() ==
8685            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8686     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8687     return new VPWidenIntOrFpInductionRecipe(
8688         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8689   }
8690 
8691   return nullptr;
8692 }
8693 
8694 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8695     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8696     VPlan &Plan) const {
8697   // Optimize the special case where the source is a constant integer
8698   // induction variable. Notice that we can only optimize the 'trunc' case
8699   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8700   // (c) other casts depend on pointer size.
8701 
8702   // Determine whether \p K is a truncation based on an induction variable that
8703   // can be optimized.
8704   auto isOptimizableIVTruncate =
8705       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8706     return [=](ElementCount VF) -> bool {
8707       return CM.isOptimizableIVTruncate(K, VF);
8708     };
8709   };
8710 
8711   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8712           isOptimizableIVTruncate(I), Range)) {
8713 
8714     InductionDescriptor II =
8715         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8716     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8717     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8718                                              Start, nullptr, I);
8719   }
8720   return nullptr;
8721 }
8722 
8723 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8724                                                 ArrayRef<VPValue *> Operands,
8725                                                 VPlanPtr &Plan) {
8726   // If all incoming values are equal, the incoming VPValue can be used directly
8727   // instead of creating a new VPBlendRecipe.
8728   VPValue *FirstIncoming = Operands[0];
8729   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8730         return FirstIncoming == Inc;
8731       })) {
8732     return Operands[0];
8733   }
8734 
8735   // We know that all PHIs in non-header blocks are converted into selects, so
8736   // we don't have to worry about the insertion order and we can just use the
8737   // builder. At this point we generate the predication tree. There may be
8738   // duplications since this is a simple recursive scan, but future
8739   // optimizations will clean it up.
8740   SmallVector<VPValue *, 2> OperandsWithMask;
8741   unsigned NumIncoming = Phi->getNumIncomingValues();
8742 
8743   for (unsigned In = 0; In < NumIncoming; In++) {
8744     VPValue *EdgeMask =
8745       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8746     assert((EdgeMask || NumIncoming == 1) &&
8747            "Multiple predecessors with one having a full mask");
8748     OperandsWithMask.push_back(Operands[In]);
8749     if (EdgeMask)
8750       OperandsWithMask.push_back(EdgeMask);
8751   }
8752   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8753 }
8754 
8755 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8756                                                    ArrayRef<VPValue *> Operands,
8757                                                    VFRange &Range) const {
8758 
8759   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8760       [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
8761       Range);
8762 
8763   if (IsPredicated)
8764     return nullptr;
8765 
8766   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8767   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8768              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8769              ID == Intrinsic::pseudoprobe ||
8770              ID == Intrinsic::experimental_noalias_scope_decl))
8771     return nullptr;
8772 
8773   auto willWiden = [&](ElementCount VF) -> bool {
8774     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8775     // The following case may be scalarized depending on the VF.
8776     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8777     // version of the instruction.
8778     // Is it beneficial to perform intrinsic call compared to lib call?
8779     bool NeedToScalarize = false;
8780     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8781     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8782     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8783     assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
8784            "Either the intrinsic cost or vector call cost must be valid");
8785     return UseVectorIntrinsic || !NeedToScalarize;
8786   };
8787 
8788   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8789     return nullptr;
8790 
8791   ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
8792   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8793 }
8794 
8795 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8796   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8797          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8798   // Instruction should be widened, unless it is scalar after vectorization,
8799   // scalarization is profitable or it is predicated.
8800   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8801     return CM.isScalarAfterVectorization(I, VF) ||
8802            CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
8803   };
8804   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8805                                                              Range);
8806 }
8807 
8808 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8809                                            ArrayRef<VPValue *> Operands) const {
8810   auto IsVectorizableOpcode = [](unsigned Opcode) {
8811     switch (Opcode) {
8812     case Instruction::Add:
8813     case Instruction::And:
8814     case Instruction::AShr:
8815     case Instruction::BitCast:
8816     case Instruction::FAdd:
8817     case Instruction::FCmp:
8818     case Instruction::FDiv:
8819     case Instruction::FMul:
8820     case Instruction::FNeg:
8821     case Instruction::FPExt:
8822     case Instruction::FPToSI:
8823     case Instruction::FPToUI:
8824     case Instruction::FPTrunc:
8825     case Instruction::FRem:
8826     case Instruction::FSub:
8827     case Instruction::ICmp:
8828     case Instruction::IntToPtr:
8829     case Instruction::LShr:
8830     case Instruction::Mul:
8831     case Instruction::Or:
8832     case Instruction::PtrToInt:
8833     case Instruction::SDiv:
8834     case Instruction::Select:
8835     case Instruction::SExt:
8836     case Instruction::Shl:
8837     case Instruction::SIToFP:
8838     case Instruction::SRem:
8839     case Instruction::Sub:
8840     case Instruction::Trunc:
8841     case Instruction::UDiv:
8842     case Instruction::UIToFP:
8843     case Instruction::URem:
8844     case Instruction::Xor:
8845     case Instruction::ZExt:
8846       return true;
8847     }
8848     return false;
8849   };
8850 
8851   if (!IsVectorizableOpcode(I->getOpcode()))
8852     return nullptr;
8853 
8854   // Success: widen this instruction.
8855   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8856 }
8857 
8858 void VPRecipeBuilder::fixHeaderPhis() {
8859   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8860   for (VPWidenPHIRecipe *R : PhisToFix) {
8861     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8862     VPRecipeBase *IncR =
8863         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8864     R->addOperand(IncR->getVPSingleValue());
8865   }
8866 }
8867 
8868 VPBasicBlock *VPRecipeBuilder::handleReplication(
8869     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8870     VPlanPtr &Plan) {
8871   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8872       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8873       Range);
8874 
8875   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8876       [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range);
8877 
8878   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8879                                        IsUniform, IsPredicated);
8880   setRecipe(I, Recipe);
8881   Plan->addVPValue(I, Recipe);
8882 
8883   // Find if I uses a predicated instruction. If so, it will use its scalar
8884   // value. Avoid hoisting the insert-element which packs the scalar value into
8885   // a vector value, as that happens iff all users use the vector value.
8886   for (VPValue *Op : Recipe->operands()) {
8887     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8888     if (!PredR)
8889       continue;
8890     auto *RepR =
8891         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8892     assert(RepR->isPredicated() &&
8893            "expected Replicate recipe to be predicated");
8894     RepR->setAlsoPack(false);
8895   }
8896 
8897   // Finalize the recipe for Instr, first if it is not predicated.
8898   if (!IsPredicated) {
8899     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8900     VPBB->appendRecipe(Recipe);
8901     return VPBB;
8902   }
8903   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8904   assert(VPBB->getSuccessors().empty() &&
8905          "VPBB has successors when handling predicated replication.");
8906   // Record predicated instructions for above packing optimizations.
8907   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8908   VPBlockUtils::insertBlockAfter(Region, VPBB);
8909   auto *RegSucc = new VPBasicBlock();
8910   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8911   return RegSucc;
8912 }
8913 
8914 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8915                                                       VPRecipeBase *PredRecipe,
8916                                                       VPlanPtr &Plan) {
8917   // Instructions marked for predication are replicated and placed under an
8918   // if-then construct to prevent side-effects.
8919 
8920   // Generate recipes to compute the block mask for this region.
8921   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8922 
8923   // Build the triangular if-then region.
8924   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8925   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8926   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8927   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8928   auto *PHIRecipe = Instr->getType()->isVoidTy()
8929                         ? nullptr
8930                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8931   if (PHIRecipe) {
8932     Plan->removeVPValueFor(Instr);
8933     Plan->addVPValue(Instr, PHIRecipe);
8934   }
8935   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8936   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8937   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8938 
8939   // Note: first set Entry as region entry and then connect successors starting
8940   // from it in order, to propagate the "parent" of each VPBasicBlock.
8941   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8942   VPBlockUtils::connectBlocks(Pred, Exit);
8943 
8944   return Region;
8945 }
8946 
8947 VPRecipeOrVPValueTy
8948 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8949                                         ArrayRef<VPValue *> Operands,
8950                                         VFRange &Range, VPlanPtr &Plan) {
8951   // First, check for specific widening recipes that deal with calls, memory
8952   // operations, inductions and Phi nodes.
8953   if (auto *CI = dyn_cast<CallInst>(Instr))
8954     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8955 
8956   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8957     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8958 
8959   VPRecipeBase *Recipe;
8960   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8961     if (Phi->getParent() != OrigLoop->getHeader())
8962       return tryToBlend(Phi, Operands, Plan);
8963     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8964       return toVPRecipeResult(Recipe);
8965 
8966     if (Legal->isReductionVariable(Phi)) {
8967       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8968       assert(RdxDesc.getRecurrenceStartValue() ==
8969              Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8970       VPValue *StartV = Operands[0];
8971 
8972       auto *PhiRecipe = new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8973       PhisToFix.push_back(PhiRecipe);
8974       // Record the incoming value from the backedge, so we can add the incoming
8975       // value from the backedge after all recipes have been created.
8976       recordRecipeOf(cast<Instruction>(
8977           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8978       return toVPRecipeResult(PhiRecipe);
8979     }
8980 
8981     return toVPRecipeResult(new VPWidenPHIRecipe(Phi));
8982   }
8983 
8984   if (isa<TruncInst>(Instr) &&
8985       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8986                                                Range, *Plan)))
8987     return toVPRecipeResult(Recipe);
8988 
8989   if (!shouldWiden(Instr, Range))
8990     return nullptr;
8991 
8992   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8993     return toVPRecipeResult(new VPWidenGEPRecipe(
8994         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8995 
8996   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8997     bool InvariantCond =
8998         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8999     return toVPRecipeResult(new VPWidenSelectRecipe(
9000         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
9001   }
9002 
9003   return toVPRecipeResult(tryToWiden(Instr, Operands));
9004 }
9005 
9006 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
9007                                                         ElementCount MaxVF) {
9008   assert(OrigLoop->isInnermost() && "Inner loop expected.");
9009 
9010   // Collect instructions from the original loop that will become trivially dead
9011   // in the vectorized loop. We don't need to vectorize these instructions. For
9012   // example, original induction update instructions can become dead because we
9013   // separately emit induction "steps" when generating code for the new loop.
9014   // Similarly, we create a new latch condition when setting up the structure
9015   // of the new loop, so the old one can become dead.
9016   SmallPtrSet<Instruction *, 4> DeadInstructions;
9017   collectTriviallyDeadInstructions(DeadInstructions);
9018 
9019   // Add assume instructions we need to drop to DeadInstructions, to prevent
9020   // them from being added to the VPlan.
9021   // TODO: We only need to drop assumes in blocks that get flattend. If the
9022   // control flow is preserved, we should keep them.
9023   auto &ConditionalAssumes = Legal->getConditionalAssumes();
9024   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
9025 
9026   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
9027   // Dead instructions do not need sinking. Remove them from SinkAfter.
9028   for (Instruction *I : DeadInstructions)
9029     SinkAfter.erase(I);
9030 
9031   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
9032   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
9033     VFRange SubRange = {VF, MaxVFPlusOne};
9034     VPlans.push_back(
9035         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
9036     VF = SubRange.End;
9037   }
9038 }
9039 
9040 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
9041     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
9042     const MapVector<Instruction *, Instruction *> &SinkAfter) {
9043 
9044   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
9045 
9046   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
9047 
9048   // ---------------------------------------------------------------------------
9049   // Pre-construction: record ingredients whose recipes we'll need to further
9050   // process after constructing the initial VPlan.
9051   // ---------------------------------------------------------------------------
9052 
9053   // Mark instructions we'll need to sink later and their targets as
9054   // ingredients whose recipe we'll need to record.
9055   for (auto &Entry : SinkAfter) {
9056     RecipeBuilder.recordRecipeOf(Entry.first);
9057     RecipeBuilder.recordRecipeOf(Entry.second);
9058   }
9059   for (auto &Reduction : CM.getInLoopReductionChains()) {
9060     PHINode *Phi = Reduction.first;
9061     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
9062     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9063 
9064     RecipeBuilder.recordRecipeOf(Phi);
9065     for (auto &R : ReductionOperations) {
9066       RecipeBuilder.recordRecipeOf(R);
9067       // For min/max reducitons, where we have a pair of icmp/select, we also
9068       // need to record the ICmp recipe, so it can be removed later.
9069       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9070         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9071     }
9072   }
9073 
9074   // For each interleave group which is relevant for this (possibly trimmed)
9075   // Range, add it to the set of groups to be later applied to the VPlan and add
9076   // placeholders for its members' Recipes which we'll be replacing with a
9077   // single VPInterleaveRecipe.
9078   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9079     auto applyIG = [IG, this](ElementCount VF) -> bool {
9080       return (VF.isVector() && // Query is illegal for VF == 1
9081               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9082                   LoopVectorizationCostModel::CM_Interleave);
9083     };
9084     if (!getDecisionAndClampRange(applyIG, Range))
9085       continue;
9086     InterleaveGroups.insert(IG);
9087     for (unsigned i = 0; i < IG->getFactor(); i++)
9088       if (Instruction *Member = IG->getMember(i))
9089         RecipeBuilder.recordRecipeOf(Member);
9090   };
9091 
9092   // ---------------------------------------------------------------------------
9093   // Build initial VPlan: Scan the body of the loop in a topological order to
9094   // visit each basic block after having visited its predecessor basic blocks.
9095   // ---------------------------------------------------------------------------
9096 
9097   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
9098   auto Plan = std::make_unique<VPlan>();
9099   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
9100   Plan->setEntry(VPBB);
9101 
9102   // Scan the body of the loop in a topological order to visit each basic block
9103   // after having visited its predecessor basic blocks.
9104   LoopBlocksDFS DFS(OrigLoop);
9105   DFS.perform(LI);
9106 
9107   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9108     // Relevant instructions from basic block BB will be grouped into VPRecipe
9109     // ingredients and fill a new VPBasicBlock.
9110     unsigned VPBBsForBB = 0;
9111     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
9112     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
9113     VPBB = FirstVPBBForBB;
9114     Builder.setInsertPoint(VPBB);
9115 
9116     // Introduce each ingredient into VPlan.
9117     // TODO: Model and preserve debug instrinsics in VPlan.
9118     for (Instruction &I : BB->instructionsWithoutDebug()) {
9119       Instruction *Instr = &I;
9120 
9121       // First filter out irrelevant instructions, to ensure no recipes are
9122       // built for them.
9123       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9124         continue;
9125 
9126       SmallVector<VPValue *, 4> Operands;
9127       auto *Phi = dyn_cast<PHINode>(Instr);
9128       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9129         Operands.push_back(Plan->getOrAddVPValue(
9130             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9131       } else {
9132         auto OpRange = Plan->mapToVPValues(Instr->operands());
9133         Operands = {OpRange.begin(), OpRange.end()};
9134       }
9135       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9136               Instr, Operands, Range, Plan)) {
9137         // If Instr can be simplified to an existing VPValue, use it.
9138         if (RecipeOrValue.is<VPValue *>()) {
9139           auto *VPV = RecipeOrValue.get<VPValue *>();
9140           Plan->addVPValue(Instr, VPV);
9141           // If the re-used value is a recipe, register the recipe for the
9142           // instruction, in case the recipe for Instr needs to be recorded.
9143           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9144             RecipeBuilder.setRecipe(Instr, R);
9145           continue;
9146         }
9147         // Otherwise, add the new recipe.
9148         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9149         for (auto *Def : Recipe->definedValues()) {
9150           auto *UV = Def->getUnderlyingValue();
9151           Plan->addVPValue(UV, Def);
9152         }
9153 
9154         RecipeBuilder.setRecipe(Instr, Recipe);
9155         VPBB->appendRecipe(Recipe);
9156         continue;
9157       }
9158 
9159       // Otherwise, if all widening options failed, Instruction is to be
9160       // replicated. This may create a successor for VPBB.
9161       VPBasicBlock *NextVPBB =
9162           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9163       if (NextVPBB != VPBB) {
9164         VPBB = NextVPBB;
9165         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9166                                     : "");
9167       }
9168     }
9169   }
9170 
9171   RecipeBuilder.fixHeaderPhis();
9172 
9173   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
9174   // may also be empty, such as the last one VPBB, reflecting original
9175   // basic-blocks with no recipes.
9176   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
9177   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
9178   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
9179   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
9180   delete PreEntry;
9181 
9182   // ---------------------------------------------------------------------------
9183   // Transform initial VPlan: Apply previously taken decisions, in order, to
9184   // bring the VPlan to its final state.
9185   // ---------------------------------------------------------------------------
9186 
9187   // Apply Sink-After legal constraints.
9188   for (auto &Entry : SinkAfter) {
9189     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9190     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9191 
9192     auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9193       auto *Region =
9194           dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9195       if (Region && Region->isReplicator()) {
9196         assert(Region->getNumSuccessors() == 1 &&
9197                Region->getNumPredecessors() == 1 && "Expected SESE region!");
9198         assert(R->getParent()->size() == 1 &&
9199                "A recipe in an original replicator region must be the only "
9200                "recipe in its block");
9201         return Region;
9202       }
9203       return nullptr;
9204     };
9205     auto *TargetRegion = GetReplicateRegion(Target);
9206     auto *SinkRegion = GetReplicateRegion(Sink);
9207     if (!SinkRegion) {
9208       // If the sink source is not a replicate region, sink the recipe directly.
9209       if (TargetRegion) {
9210         // The target is in a replication region, make sure to move Sink to
9211         // the block after it, not into the replication region itself.
9212         VPBasicBlock *NextBlock =
9213             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9214         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9215       } else
9216         Sink->moveAfter(Target);
9217       continue;
9218     }
9219 
9220     // The sink source is in a replicate region. Unhook the region from the CFG.
9221     auto *SinkPred = SinkRegion->getSinglePredecessor();
9222     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9223     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9224     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9225     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9226 
9227     if (TargetRegion) {
9228       // The target recipe is also in a replicate region, move the sink region
9229       // after the target region.
9230       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9231       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9232       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9233       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9234     } else {
9235       // The sink source is in a replicate region, we need to move the whole
9236       // replicate region, which should only contain a single recipe in the main
9237       // block.
9238       auto *SplitBlock =
9239           Target->getParent()->splitAt(std::next(Target->getIterator()));
9240 
9241       auto *SplitPred = SplitBlock->getSinglePredecessor();
9242 
9243       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9244       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9245       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9246       if (VPBB == SplitPred)
9247         VPBB = SplitBlock;
9248     }
9249   }
9250 
9251   // Interleave memory: for each Interleave Group we marked earlier as relevant
9252   // for this VPlan, replace the Recipes widening its memory instructions with a
9253   // single VPInterleaveRecipe at its insertion point.
9254   for (auto IG : InterleaveGroups) {
9255     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9256         RecipeBuilder.getRecipe(IG->getInsertPos()));
9257     SmallVector<VPValue *, 4> StoredValues;
9258     for (unsigned i = 0; i < IG->getFactor(); ++i)
9259       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
9260         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
9261 
9262     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9263                                         Recipe->getMask());
9264     VPIG->insertBefore(Recipe);
9265     unsigned J = 0;
9266     for (unsigned i = 0; i < IG->getFactor(); ++i)
9267       if (Instruction *Member = IG->getMember(i)) {
9268         if (!Member->getType()->isVoidTy()) {
9269           VPValue *OriginalV = Plan->getVPValue(Member);
9270           Plan->removeVPValueFor(Member);
9271           Plan->addVPValue(Member, VPIG->getVPValue(J));
9272           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9273           J++;
9274         }
9275         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9276       }
9277   }
9278 
9279   // Adjust the recipes for any inloop reductions.
9280   if (Range.Start.isVector())
9281     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
9282 
9283   // Finally, if tail is folded by masking, introduce selects between the phi
9284   // and the live-out instruction of each reduction, at the end of the latch.
9285   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
9286     Builder.setInsertPoint(VPBB);
9287     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9288     for (auto &Reduction : Legal->getReductionVars()) {
9289       if (CM.isInLoopReduction(Reduction.first))
9290         continue;
9291       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
9292       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
9293       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
9294     }
9295   }
9296 
9297   VPlanTransforms::sinkScalarOperands(*Plan);
9298 
9299   std::string PlanName;
9300   raw_string_ostream RSO(PlanName);
9301   ElementCount VF = Range.Start;
9302   Plan->addVF(VF);
9303   RSO << "Initial VPlan for VF={" << VF;
9304   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9305     Plan->addVF(VF);
9306     RSO << "," << VF;
9307   }
9308   RSO << "},UF>=1";
9309   RSO.flush();
9310   Plan->setName(PlanName);
9311 
9312   return Plan;
9313 }
9314 
9315 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9316   // Outer loop handling: They may require CFG and instruction level
9317   // transformations before even evaluating whether vectorization is profitable.
9318   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9319   // the vectorization pipeline.
9320   assert(!OrigLoop->isInnermost());
9321   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9322 
9323   // Create new empty VPlan
9324   auto Plan = std::make_unique<VPlan>();
9325 
9326   // Build hierarchical CFG
9327   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9328   HCFGBuilder.buildHierarchicalCFG();
9329 
9330   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9331        VF *= 2)
9332     Plan->addVF(VF);
9333 
9334   if (EnableVPlanPredication) {
9335     VPlanPredicator VPP(*Plan);
9336     VPP.predicate();
9337 
9338     // Avoid running transformation to recipes until masked code generation in
9339     // VPlan-native path is in place.
9340     return Plan;
9341   }
9342 
9343   SmallPtrSet<Instruction *, 1> DeadInstructions;
9344   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9345                                              Legal->getInductionVars(),
9346                                              DeadInstructions, *PSE.getSE());
9347   return Plan;
9348 }
9349 
9350 // Adjust the recipes for any inloop reductions. The chain of instructions
9351 // leading from the loop exit instr to the phi need to be converted to
9352 // reductions, with one operand being vector and the other being the scalar
9353 // reduction chain.
9354 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
9355     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
9356   for (auto &Reduction : CM.getInLoopReductionChains()) {
9357     PHINode *Phi = Reduction.first;
9358     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9359     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9360 
9361     // ReductionOperations are orders top-down from the phi's use to the
9362     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9363     // which of the two operands will remain scalar and which will be reduced.
9364     // For minmax the chain will be the select instructions.
9365     Instruction *Chain = Phi;
9366     for (Instruction *R : ReductionOperations) {
9367       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9368       RecurKind Kind = RdxDesc.getRecurrenceKind();
9369 
9370       VPValue *ChainOp = Plan->getVPValue(Chain);
9371       unsigned FirstOpId;
9372       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9373         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9374                "Expected to replace a VPWidenSelectSC");
9375         FirstOpId = 1;
9376       } else {
9377         assert(isa<VPWidenRecipe>(WidenRecipe) &&
9378                "Expected to replace a VPWidenSC");
9379         FirstOpId = 0;
9380       }
9381       unsigned VecOpId =
9382           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9383       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9384 
9385       auto *CondOp = CM.foldTailByMasking()
9386                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9387                          : nullptr;
9388       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
9389           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9390       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9391       Plan->removeVPValueFor(R);
9392       Plan->addVPValue(R, RedRecipe);
9393       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9394       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9395       WidenRecipe->eraseFromParent();
9396 
9397       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9398         VPRecipeBase *CompareRecipe =
9399             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9400         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9401                "Expected to replace a VPWidenSC");
9402         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9403                "Expected no remaining users");
9404         CompareRecipe->eraseFromParent();
9405       }
9406       Chain = R;
9407     }
9408   }
9409 }
9410 
9411 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9412 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9413                                VPSlotTracker &SlotTracker) const {
9414   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9415   IG->getInsertPos()->printAsOperand(O, false);
9416   O << ", ";
9417   getAddr()->printAsOperand(O, SlotTracker);
9418   VPValue *Mask = getMask();
9419   if (Mask) {
9420     O << ", ";
9421     Mask->printAsOperand(O, SlotTracker);
9422   }
9423   for (unsigned i = 0; i < IG->getFactor(); ++i)
9424     if (Instruction *I = IG->getMember(i))
9425       O << "\n" << Indent << "  " << VPlanIngredient(I) << " " << i;
9426 }
9427 #endif
9428 
9429 void VPWidenCallRecipe::execute(VPTransformState &State) {
9430   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9431                                   *this, State);
9432 }
9433 
9434 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9435   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9436                                     this, *this, InvariantCond, State);
9437 }
9438 
9439 void VPWidenRecipe::execute(VPTransformState &State) {
9440   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9441 }
9442 
9443 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9444   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9445                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9446                       IsIndexLoopInvariant, State);
9447 }
9448 
9449 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9450   assert(!State.Instance && "Int or FP induction being replicated.");
9451   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9452                                    getTruncInst(), getVPValue(0),
9453                                    getCastValue(), State);
9454 }
9455 
9456 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9457   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
9458                                  this, State);
9459 }
9460 
9461 void VPBlendRecipe::execute(VPTransformState &State) {
9462   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9463   // We know that all PHIs in non-header blocks are converted into
9464   // selects, so we don't have to worry about the insertion order and we
9465   // can just use the builder.
9466   // At this point we generate the predication tree. There may be
9467   // duplications since this is a simple recursive scan, but future
9468   // optimizations will clean it up.
9469 
9470   unsigned NumIncoming = getNumIncomingValues();
9471 
9472   // Generate a sequence of selects of the form:
9473   // SELECT(Mask3, In3,
9474   //        SELECT(Mask2, In2,
9475   //               SELECT(Mask1, In1,
9476   //                      In0)))
9477   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9478   // are essentially undef are taken from In0.
9479   InnerLoopVectorizer::VectorParts Entry(State.UF);
9480   for (unsigned In = 0; In < NumIncoming; ++In) {
9481     for (unsigned Part = 0; Part < State.UF; ++Part) {
9482       // We might have single edge PHIs (blocks) - use an identity
9483       // 'select' for the first PHI operand.
9484       Value *In0 = State.get(getIncomingValue(In), Part);
9485       if (In == 0)
9486         Entry[Part] = In0; // Initialize with the first incoming value.
9487       else {
9488         // Select between the current value and the previous incoming edge
9489         // based on the incoming mask.
9490         Value *Cond = State.get(getMask(In), Part);
9491         Entry[Part] =
9492             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9493       }
9494     }
9495   }
9496   for (unsigned Part = 0; Part < State.UF; ++Part)
9497     State.set(this, Entry[Part], Part);
9498 }
9499 
9500 void VPInterleaveRecipe::execute(VPTransformState &State) {
9501   assert(!State.Instance && "Interleave group being replicated.");
9502   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9503                                       getStoredValues(), getMask());
9504 }
9505 
9506 void VPReductionRecipe::execute(VPTransformState &State) {
9507   assert(!State.Instance && "Reduction being replicated.");
9508   Value *PrevInChain = State.get(getChainOp(), 0);
9509   for (unsigned Part = 0; Part < State.UF; ++Part) {
9510     RecurKind Kind = RdxDesc->getRecurrenceKind();
9511     bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9512     Value *NewVecOp = State.get(getVecOp(), Part);
9513     if (VPValue *Cond = getCondOp()) {
9514       Value *NewCond = State.get(Cond, Part);
9515       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9516       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9517           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9518       Constant *IdenVec =
9519           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9520       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9521       NewVecOp = Select;
9522     }
9523     Value *NewRed;
9524     Value *NextInChain;
9525     if (IsOrdered) {
9526       NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9527                                       PrevInChain);
9528       PrevInChain = NewRed;
9529     } else {
9530       PrevInChain = State.get(getChainOp(), Part);
9531       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9532     }
9533     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9534       NextInChain =
9535           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9536                          NewRed, PrevInChain);
9537     } else if (IsOrdered)
9538       NextInChain = NewRed;
9539     else {
9540       NextInChain = State.Builder.CreateBinOp(
9541           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9542           PrevInChain);
9543     }
9544     State.set(this, NextInChain, Part);
9545   }
9546 }
9547 
9548 void VPReplicateRecipe::execute(VPTransformState &State) {
9549   if (State.Instance) { // Generate a single instance.
9550     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9551     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9552                                     *State.Instance, IsPredicated, State);
9553     // Insert scalar instance packing it into a vector.
9554     if (AlsoPack && State.VF.isVector()) {
9555       // If we're constructing lane 0, initialize to start from poison.
9556       if (State.Instance->Lane.isFirstLane()) {
9557         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9558         Value *Poison = PoisonValue::get(
9559             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9560         State.set(this, Poison, State.Instance->Part);
9561       }
9562       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9563     }
9564     return;
9565   }
9566 
9567   // Generate scalar instances for all VF lanes of all UF parts, unless the
9568   // instruction is uniform inwhich case generate only the first lane for each
9569   // of the UF parts.
9570   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9571   assert((!State.VF.isScalable() || IsUniform) &&
9572          "Can't scalarize a scalable vector");
9573   for (unsigned Part = 0; Part < State.UF; ++Part)
9574     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9575       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9576                                       VPIteration(Part, Lane), IsPredicated,
9577                                       State);
9578 }
9579 
9580 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9581   assert(State.Instance && "Branch on Mask works only on single instance.");
9582 
9583   unsigned Part = State.Instance->Part;
9584   unsigned Lane = State.Instance->Lane.getKnownLane();
9585 
9586   Value *ConditionBit = nullptr;
9587   VPValue *BlockInMask = getMask();
9588   if (BlockInMask) {
9589     ConditionBit = State.get(BlockInMask, Part);
9590     if (ConditionBit->getType()->isVectorTy())
9591       ConditionBit = State.Builder.CreateExtractElement(
9592           ConditionBit, State.Builder.getInt32(Lane));
9593   } else // Block in mask is all-one.
9594     ConditionBit = State.Builder.getTrue();
9595 
9596   // Replace the temporary unreachable terminator with a new conditional branch,
9597   // whose two destinations will be set later when they are created.
9598   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9599   assert(isa<UnreachableInst>(CurrentTerminator) &&
9600          "Expected to replace unreachable terminator with conditional branch.");
9601   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9602   CondBr->setSuccessor(0, nullptr);
9603   ReplaceInstWithInst(CurrentTerminator, CondBr);
9604 }
9605 
9606 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9607   assert(State.Instance && "Predicated instruction PHI works per instance.");
9608   Instruction *ScalarPredInst =
9609       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9610   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9611   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9612   assert(PredicatingBB && "Predicated block has no single predecessor.");
9613   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9614          "operand must be VPReplicateRecipe");
9615 
9616   // By current pack/unpack logic we need to generate only a single phi node: if
9617   // a vector value for the predicated instruction exists at this point it means
9618   // the instruction has vector users only, and a phi for the vector value is
9619   // needed. In this case the recipe of the predicated instruction is marked to
9620   // also do that packing, thereby "hoisting" the insert-element sequence.
9621   // Otherwise, a phi node for the scalar value is needed.
9622   unsigned Part = State.Instance->Part;
9623   if (State.hasVectorValue(getOperand(0), Part)) {
9624     Value *VectorValue = State.get(getOperand(0), Part);
9625     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9626     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9627     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9628     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9629     if (State.hasVectorValue(this, Part))
9630       State.reset(this, VPhi, Part);
9631     else
9632       State.set(this, VPhi, Part);
9633     // NOTE: Currently we need to update the value of the operand, so the next
9634     // predicated iteration inserts its generated value in the correct vector.
9635     State.reset(getOperand(0), VPhi, Part);
9636   } else {
9637     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9638     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9639     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9640                      PredicatingBB);
9641     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9642     if (State.hasScalarValue(this, *State.Instance))
9643       State.reset(this, Phi, *State.Instance);
9644     else
9645       State.set(this, Phi, *State.Instance);
9646     // NOTE: Currently we need to update the value of the operand, so the next
9647     // predicated iteration inserts its generated value in the correct vector.
9648     State.reset(getOperand(0), Phi, *State.Instance);
9649   }
9650 }
9651 
9652 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9653   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9654   State.ILV->vectorizeMemoryInstruction(
9655       &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(),
9656       StoredValue, getMask());
9657 }
9658 
9659 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9660 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9661 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9662 // for predication.
9663 static ScalarEpilogueLowering getScalarEpilogueLowering(
9664     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9665     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9666     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9667     LoopVectorizationLegality &LVL) {
9668   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9669   // don't look at hints or options, and don't request a scalar epilogue.
9670   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9671   // LoopAccessInfo (due to code dependency and not being able to reliably get
9672   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9673   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9674   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9675   // back to the old way and vectorize with versioning when forced. See D81345.)
9676   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9677                                                       PGSOQueryType::IRPass) &&
9678                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9679     return CM_ScalarEpilogueNotAllowedOptSize;
9680 
9681   // 2) If set, obey the directives
9682   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9683     switch (PreferPredicateOverEpilogue) {
9684     case PreferPredicateTy::ScalarEpilogue:
9685       return CM_ScalarEpilogueAllowed;
9686     case PreferPredicateTy::PredicateElseScalarEpilogue:
9687       return CM_ScalarEpilogueNotNeededUsePredicate;
9688     case PreferPredicateTy::PredicateOrDontVectorize:
9689       return CM_ScalarEpilogueNotAllowedUsePredicate;
9690     };
9691   }
9692 
9693   // 3) If set, obey the hints
9694   switch (Hints.getPredicate()) {
9695   case LoopVectorizeHints::FK_Enabled:
9696     return CM_ScalarEpilogueNotNeededUsePredicate;
9697   case LoopVectorizeHints::FK_Disabled:
9698     return CM_ScalarEpilogueAllowed;
9699   };
9700 
9701   // 4) if the TTI hook indicates this is profitable, request predication.
9702   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9703                                        LVL.getLAI()))
9704     return CM_ScalarEpilogueNotNeededUsePredicate;
9705 
9706   return CM_ScalarEpilogueAllowed;
9707 }
9708 
9709 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9710   // If Values have been set for this Def return the one relevant for \p Part.
9711   if (hasVectorValue(Def, Part))
9712     return Data.PerPartOutput[Def][Part];
9713 
9714   if (!hasScalarValue(Def, {Part, 0})) {
9715     Value *IRV = Def->getLiveInIRValue();
9716     Value *B = ILV->getBroadcastInstrs(IRV);
9717     set(Def, B, Part);
9718     return B;
9719   }
9720 
9721   Value *ScalarValue = get(Def, {Part, 0});
9722   // If we aren't vectorizing, we can just copy the scalar map values over
9723   // to the vector map.
9724   if (VF.isScalar()) {
9725     set(Def, ScalarValue, Part);
9726     return ScalarValue;
9727   }
9728 
9729   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9730   bool IsUniform = RepR && RepR->isUniform();
9731 
9732   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9733   // Check if there is a scalar value for the selected lane.
9734   if (!hasScalarValue(Def, {Part, LastLane})) {
9735     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
9736     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
9737            "unexpected recipe found to be invariant");
9738     IsUniform = true;
9739     LastLane = 0;
9740   }
9741 
9742   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9743   // Set the insert point after the last scalarized instruction or after the
9744   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
9745   // will directly follow the scalar definitions.
9746   auto OldIP = Builder.saveIP();
9747   auto NewIP =
9748       isa<PHINode>(LastInst)
9749           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
9750           : std::next(BasicBlock::iterator(LastInst));
9751   Builder.SetInsertPoint(&*NewIP);
9752 
9753   // However, if we are vectorizing, we need to construct the vector values.
9754   // If the value is known to be uniform after vectorization, we can just
9755   // broadcast the scalar value corresponding to lane zero for each unroll
9756   // iteration. Otherwise, we construct the vector values using
9757   // insertelement instructions. Since the resulting vectors are stored in
9758   // State, we will only generate the insertelements once.
9759   Value *VectorValue = nullptr;
9760   if (IsUniform) {
9761     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9762     set(Def, VectorValue, Part);
9763   } else {
9764     // Initialize packing with insertelements to start from undef.
9765     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9766     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9767     set(Def, Undef, Part);
9768     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9769       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9770     VectorValue = get(Def, Part);
9771   }
9772   Builder.restoreIP(OldIP);
9773   return VectorValue;
9774 }
9775 
9776 // Process the loop in the VPlan-native vectorization path. This path builds
9777 // VPlan upfront in the vectorization pipeline, which allows to apply
9778 // VPlan-to-VPlan transformations from the very beginning without modifying the
9779 // input LLVM IR.
9780 static bool processLoopInVPlanNativePath(
9781     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9782     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9783     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9784     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9785     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
9786     LoopVectorizationRequirements &Requirements) {
9787 
9788   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9789     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9790     return false;
9791   }
9792   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9793   Function *F = L->getHeader()->getParent();
9794   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9795 
9796   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9797       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9798 
9799   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9800                                 &Hints, IAI);
9801   // Use the planner for outer loop vectorization.
9802   // TODO: CM is not used at this point inside the planner. Turn CM into an
9803   // optional argument if we don't need it in the future.
9804   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
9805                                Requirements, ORE);
9806 
9807   // Get user vectorization factor.
9808   ElementCount UserVF = Hints.getWidth();
9809 
9810   // Plan how to best vectorize, return the best VF and its cost.
9811   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9812 
9813   // If we are stress testing VPlan builds, do not attempt to generate vector
9814   // code. Masked vector code generation support will follow soon.
9815   // Also, do not attempt to vectorize if no vector code will be produced.
9816   if (VPlanBuildStressTest || EnableVPlanPredication ||
9817       VectorizationFactor::Disabled() == VF)
9818     return false;
9819 
9820   LVP.setBestPlan(VF.Width, 1);
9821 
9822   {
9823     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9824                              F->getParent()->getDataLayout());
9825     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9826                            &CM, BFI, PSI, Checks);
9827     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9828                       << L->getHeader()->getParent()->getName() << "\"\n");
9829     LVP.executePlan(LB, DT);
9830   }
9831 
9832   // Mark the loop as already vectorized to avoid vectorizing again.
9833   Hints.setAlreadyVectorized();
9834   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9835   return true;
9836 }
9837 
9838 // Emit a remark if there are stores to floats that required a floating point
9839 // extension. If the vectorized loop was generated with floating point there
9840 // will be a performance penalty from the conversion overhead and the change in
9841 // the vector width.
9842 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9843   SmallVector<Instruction *, 4> Worklist;
9844   for (BasicBlock *BB : L->getBlocks()) {
9845     for (Instruction &Inst : *BB) {
9846       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9847         if (S->getValueOperand()->getType()->isFloatTy())
9848           Worklist.push_back(S);
9849       }
9850     }
9851   }
9852 
9853   // Traverse the floating point stores upwards searching, for floating point
9854   // conversions.
9855   SmallPtrSet<const Instruction *, 4> Visited;
9856   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9857   while (!Worklist.empty()) {
9858     auto *I = Worklist.pop_back_val();
9859     if (!L->contains(I))
9860       continue;
9861     if (!Visited.insert(I).second)
9862       continue;
9863 
9864     // Emit a remark if the floating point store required a floating
9865     // point conversion.
9866     // TODO: More work could be done to identify the root cause such as a
9867     // constant or a function return type and point the user to it.
9868     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9869       ORE->emit([&]() {
9870         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9871                                           I->getDebugLoc(), L->getHeader())
9872                << "floating point conversion changes vector width. "
9873                << "Mixed floating point precision requires an up/down "
9874                << "cast that will negatively impact performance.";
9875       });
9876 
9877     for (Use &Op : I->operands())
9878       if (auto *OpI = dyn_cast<Instruction>(Op))
9879         Worklist.push_back(OpI);
9880   }
9881 }
9882 
9883 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9884     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9885                                !EnableLoopInterleaving),
9886       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9887                               !EnableLoopVectorization) {}
9888 
9889 bool LoopVectorizePass::processLoop(Loop *L) {
9890   assert((EnableVPlanNativePath || L->isInnermost()) &&
9891          "VPlan-native path is not enabled. Only process inner loops.");
9892 
9893 #ifndef NDEBUG
9894   const std::string DebugLocStr = getDebugLocString(L);
9895 #endif /* NDEBUG */
9896 
9897   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9898                     << L->getHeader()->getParent()->getName() << "\" from "
9899                     << DebugLocStr << "\n");
9900 
9901   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9902 
9903   LLVM_DEBUG(
9904       dbgs() << "LV: Loop hints:"
9905              << " force="
9906              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9907                      ? "disabled"
9908                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9909                             ? "enabled"
9910                             : "?"))
9911              << " width=" << Hints.getWidth()
9912              << " interleave=" << Hints.getInterleave() << "\n");
9913 
9914   // Function containing loop
9915   Function *F = L->getHeader()->getParent();
9916 
9917   // Looking at the diagnostic output is the only way to determine if a loop
9918   // was vectorized (other than looking at the IR or machine code), so it
9919   // is important to generate an optimization remark for each loop. Most of
9920   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9921   // generated as OptimizationRemark and OptimizationRemarkMissed are
9922   // less verbose reporting vectorized loops and unvectorized loops that may
9923   // benefit from vectorization, respectively.
9924 
9925   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9926     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9927     return false;
9928   }
9929 
9930   PredicatedScalarEvolution PSE(*SE, *L);
9931 
9932   // Check if it is legal to vectorize the loop.
9933   LoopVectorizationRequirements Requirements;
9934   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9935                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9936   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9937     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9938     Hints.emitRemarkWithHints();
9939     return false;
9940   }
9941 
9942   // Check the function attributes and profiles to find out if this function
9943   // should be optimized for size.
9944   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9945       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9946 
9947   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9948   // here. They may require CFG and instruction level transformations before
9949   // even evaluating whether vectorization is profitable. Since we cannot modify
9950   // the incoming IR, we need to build VPlan upfront in the vectorization
9951   // pipeline.
9952   if (!L->isInnermost())
9953     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9954                                         ORE, BFI, PSI, Hints, Requirements);
9955 
9956   assert(L->isInnermost() && "Inner loop expected.");
9957 
9958   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9959   // count by optimizing for size, to minimize overheads.
9960   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9961   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9962     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9963                       << "This loop is worth vectorizing only if no scalar "
9964                       << "iteration overheads are incurred.");
9965     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9966       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9967     else {
9968       LLVM_DEBUG(dbgs() << "\n");
9969       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9970     }
9971   }
9972 
9973   // Check the function attributes to see if implicit floats are allowed.
9974   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9975   // an integer loop and the vector instructions selected are purely integer
9976   // vector instructions?
9977   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9978     reportVectorizationFailure(
9979         "Can't vectorize when the NoImplicitFloat attribute is used",
9980         "loop not vectorized due to NoImplicitFloat attribute",
9981         "NoImplicitFloat", ORE, L);
9982     Hints.emitRemarkWithHints();
9983     return false;
9984   }
9985 
9986   // Check if the target supports potentially unsafe FP vectorization.
9987   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9988   // for the target we're vectorizing for, to make sure none of the
9989   // additional fp-math flags can help.
9990   if (Hints.isPotentiallyUnsafe() &&
9991       TTI->isFPVectorizationPotentiallyUnsafe()) {
9992     reportVectorizationFailure(
9993         "Potentially unsafe FP op prevents vectorization",
9994         "loop not vectorized due to unsafe FP support.",
9995         "UnsafeFP", ORE, L);
9996     Hints.emitRemarkWithHints();
9997     return false;
9998   }
9999 
10000   if (!LVL.canVectorizeFPMath(EnableStrictReductions)) {
10001     ORE->emit([&]() {
10002       auto *ExactFPMathInst = Requirements.getExactFPInst();
10003       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10004                                                  ExactFPMathInst->getDebugLoc(),
10005                                                  ExactFPMathInst->getParent())
10006              << "loop not vectorized: cannot prove it is safe to reorder "
10007                 "floating-point operations";
10008     });
10009     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10010                          "reorder floating-point operations\n");
10011     Hints.emitRemarkWithHints();
10012     return false;
10013   }
10014 
10015   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10016   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10017 
10018   // If an override option has been passed in for interleaved accesses, use it.
10019   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10020     UseInterleaved = EnableInterleavedMemAccesses;
10021 
10022   // Analyze interleaved memory accesses.
10023   if (UseInterleaved) {
10024     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10025   }
10026 
10027   // Use the cost model.
10028   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10029                                 F, &Hints, IAI);
10030   CM.collectValuesToIgnore();
10031 
10032   // Use the planner for vectorization.
10033   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10034                                Requirements, ORE);
10035 
10036   // Get user vectorization factor and interleave count.
10037   ElementCount UserVF = Hints.getWidth();
10038   unsigned UserIC = Hints.getInterleave();
10039 
10040   // Plan how to best vectorize, return the best VF and its cost.
10041   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10042 
10043   VectorizationFactor VF = VectorizationFactor::Disabled();
10044   unsigned IC = 1;
10045 
10046   if (MaybeVF) {
10047     VF = *MaybeVF;
10048     // Select the interleave count.
10049     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10050   }
10051 
10052   // Identify the diagnostic messages that should be produced.
10053   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10054   bool VectorizeLoop = true, InterleaveLoop = true;
10055   if (VF.Width.isScalar()) {
10056     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10057     VecDiagMsg = std::make_pair(
10058         "VectorizationNotBeneficial",
10059         "the cost-model indicates that vectorization is not beneficial");
10060     VectorizeLoop = false;
10061   }
10062 
10063   if (!MaybeVF && UserIC > 1) {
10064     // Tell the user interleaving was avoided up-front, despite being explicitly
10065     // requested.
10066     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10067                          "interleaving should be avoided up front\n");
10068     IntDiagMsg = std::make_pair(
10069         "InterleavingAvoided",
10070         "Ignoring UserIC, because interleaving was avoided up front");
10071     InterleaveLoop = false;
10072   } else if (IC == 1 && UserIC <= 1) {
10073     // Tell the user interleaving is not beneficial.
10074     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10075     IntDiagMsg = std::make_pair(
10076         "InterleavingNotBeneficial",
10077         "the cost-model indicates that interleaving is not beneficial");
10078     InterleaveLoop = false;
10079     if (UserIC == 1) {
10080       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10081       IntDiagMsg.second +=
10082           " and is explicitly disabled or interleave count is set to 1";
10083     }
10084   } else if (IC > 1 && UserIC == 1) {
10085     // Tell the user interleaving is beneficial, but it explicitly disabled.
10086     LLVM_DEBUG(
10087         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10088     IntDiagMsg = std::make_pair(
10089         "InterleavingBeneficialButDisabled",
10090         "the cost-model indicates that interleaving is beneficial "
10091         "but is explicitly disabled or interleave count is set to 1");
10092     InterleaveLoop = false;
10093   }
10094 
10095   // Override IC if user provided an interleave count.
10096   IC = UserIC > 0 ? UserIC : IC;
10097 
10098   // Emit diagnostic messages, if any.
10099   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10100   if (!VectorizeLoop && !InterleaveLoop) {
10101     // Do not vectorize or interleaving the loop.
10102     ORE->emit([&]() {
10103       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10104                                       L->getStartLoc(), L->getHeader())
10105              << VecDiagMsg.second;
10106     });
10107     ORE->emit([&]() {
10108       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10109                                       L->getStartLoc(), L->getHeader())
10110              << IntDiagMsg.second;
10111     });
10112     return false;
10113   } else if (!VectorizeLoop && InterleaveLoop) {
10114     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10115     ORE->emit([&]() {
10116       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10117                                         L->getStartLoc(), L->getHeader())
10118              << VecDiagMsg.second;
10119     });
10120   } else if (VectorizeLoop && !InterleaveLoop) {
10121     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10122                       << ") in " << DebugLocStr << '\n');
10123     ORE->emit([&]() {
10124       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10125                                         L->getStartLoc(), L->getHeader())
10126              << IntDiagMsg.second;
10127     });
10128   } else if (VectorizeLoop && InterleaveLoop) {
10129     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10130                       << ") in " << DebugLocStr << '\n');
10131     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10132   }
10133 
10134   bool DisableRuntimeUnroll = false;
10135   MDNode *OrigLoopID = L->getLoopID();
10136   {
10137     // Optimistically generate runtime checks. Drop them if they turn out to not
10138     // be profitable. Limit the scope of Checks, so the cleanup happens
10139     // immediately after vector codegeneration is done.
10140     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10141                              F->getParent()->getDataLayout());
10142     if (!VF.Width.isScalar() || IC > 1)
10143       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10144     LVP.setBestPlan(VF.Width, IC);
10145 
10146     using namespace ore;
10147     if (!VectorizeLoop) {
10148       assert(IC > 1 && "interleave count should not be 1 or 0");
10149       // If we decided that it is not legal to vectorize the loop, then
10150       // interleave it.
10151       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10152                                  &CM, BFI, PSI, Checks);
10153       LVP.executePlan(Unroller, DT);
10154 
10155       ORE->emit([&]() {
10156         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10157                                   L->getHeader())
10158                << "interleaved loop (interleaved count: "
10159                << NV("InterleaveCount", IC) << ")";
10160       });
10161     } else {
10162       // If we decided that it is *legal* to vectorize the loop, then do it.
10163 
10164       // Consider vectorizing the epilogue too if it's profitable.
10165       VectorizationFactor EpilogueVF =
10166           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10167       if (EpilogueVF.Width.isVector()) {
10168 
10169         // The first pass vectorizes the main loop and creates a scalar epilogue
10170         // to be vectorized by executing the plan (potentially with a different
10171         // factor) again shortly afterwards.
10172         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
10173                                           EpilogueVF.Width.getKnownMinValue(),
10174                                           1);
10175         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10176                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10177 
10178         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
10179         LVP.executePlan(MainILV, DT);
10180         ++LoopsVectorized;
10181 
10182         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10183         formLCSSARecursively(*L, *DT, LI, SE);
10184 
10185         // Second pass vectorizes the epilogue and adjusts the control flow
10186         // edges from the first pass.
10187         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
10188         EPI.MainLoopVF = EPI.EpilogueVF;
10189         EPI.MainLoopUF = EPI.EpilogueUF;
10190         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10191                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10192                                                  Checks);
10193         LVP.executePlan(EpilogILV, DT);
10194         ++LoopsEpilogueVectorized;
10195 
10196         if (!MainILV.areSafetyChecksAdded())
10197           DisableRuntimeUnroll = true;
10198       } else {
10199         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10200                                &LVL, &CM, BFI, PSI, Checks);
10201         LVP.executePlan(LB, DT);
10202         ++LoopsVectorized;
10203 
10204         // Add metadata to disable runtime unrolling a scalar loop when there
10205         // are no runtime checks about strides and memory. A scalar loop that is
10206         // rarely used is not worth unrolling.
10207         if (!LB.areSafetyChecksAdded())
10208           DisableRuntimeUnroll = true;
10209       }
10210       // Report the vectorization decision.
10211       ORE->emit([&]() {
10212         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10213                                   L->getHeader())
10214                << "vectorized loop (vectorization width: "
10215                << NV("VectorizationFactor", VF.Width)
10216                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10217       });
10218     }
10219 
10220     if (ORE->allowExtraAnalysis(LV_NAME))
10221       checkMixedPrecision(L, ORE);
10222   }
10223 
10224   Optional<MDNode *> RemainderLoopID =
10225       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10226                                       LLVMLoopVectorizeFollowupEpilogue});
10227   if (RemainderLoopID.hasValue()) {
10228     L->setLoopID(RemainderLoopID.getValue());
10229   } else {
10230     if (DisableRuntimeUnroll)
10231       AddRuntimeUnrollDisableMetaData(L);
10232 
10233     // Mark the loop as already vectorized to avoid vectorizing again.
10234     Hints.setAlreadyVectorized();
10235   }
10236 
10237   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10238   return true;
10239 }
10240 
10241 LoopVectorizeResult LoopVectorizePass::runImpl(
10242     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10243     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10244     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10245     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10246     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10247   SE = &SE_;
10248   LI = &LI_;
10249   TTI = &TTI_;
10250   DT = &DT_;
10251   BFI = &BFI_;
10252   TLI = TLI_;
10253   AA = &AA_;
10254   AC = &AC_;
10255   GetLAA = &GetLAA_;
10256   DB = &DB_;
10257   ORE = &ORE_;
10258   PSI = PSI_;
10259 
10260   // Don't attempt if
10261   // 1. the target claims to have no vector registers, and
10262   // 2. interleaving won't help ILP.
10263   //
10264   // The second condition is necessary because, even if the target has no
10265   // vector registers, loop vectorization may still enable scalar
10266   // interleaving.
10267   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10268       TTI->getMaxInterleaveFactor(1) < 2)
10269     return LoopVectorizeResult(false, false);
10270 
10271   bool Changed = false, CFGChanged = false;
10272 
10273   // The vectorizer requires loops to be in simplified form.
10274   // Since simplification may add new inner loops, it has to run before the
10275   // legality and profitability checks. This means running the loop vectorizer
10276   // will simplify all loops, regardless of whether anything end up being
10277   // vectorized.
10278   for (auto &L : *LI)
10279     Changed |= CFGChanged |=
10280         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10281 
10282   // Build up a worklist of inner-loops to vectorize. This is necessary as
10283   // the act of vectorizing or partially unrolling a loop creates new loops
10284   // and can invalidate iterators across the loops.
10285   SmallVector<Loop *, 8> Worklist;
10286 
10287   for (Loop *L : *LI)
10288     collectSupportedLoops(*L, LI, ORE, Worklist);
10289 
10290   LoopsAnalyzed += Worklist.size();
10291 
10292   // Now walk the identified inner loops.
10293   while (!Worklist.empty()) {
10294     Loop *L = Worklist.pop_back_val();
10295 
10296     // For the inner loops we actually process, form LCSSA to simplify the
10297     // transform.
10298     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10299 
10300     Changed |= CFGChanged |= processLoop(L);
10301   }
10302 
10303   // Process each loop nest in the function.
10304   return LoopVectorizeResult(Changed, CFGChanged);
10305 }
10306 
10307 PreservedAnalyses LoopVectorizePass::run(Function &F,
10308                                          FunctionAnalysisManager &AM) {
10309     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10310     auto &LI = AM.getResult<LoopAnalysis>(F);
10311     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10312     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10313     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10314     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10315     auto &AA = AM.getResult<AAManager>(F);
10316     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10317     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10318     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10319     MemorySSA *MSSA = EnableMSSALoopDependency
10320                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
10321                           : nullptr;
10322 
10323     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10324     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10325         [&](Loop &L) -> const LoopAccessInfo & {
10326       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
10327                                         TLI, TTI, nullptr, MSSA};
10328       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10329     };
10330     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10331     ProfileSummaryInfo *PSI =
10332         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10333     LoopVectorizeResult Result =
10334         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10335     if (!Result.MadeAnyChange)
10336       return PreservedAnalyses::all();
10337     PreservedAnalyses PA;
10338 
10339     // We currently do not preserve loopinfo/dominator analyses with outer loop
10340     // vectorization. Until this is addressed, mark these analyses as preserved
10341     // only for non-VPlan-native path.
10342     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10343     if (!EnableVPlanNativePath) {
10344       PA.preserve<LoopAnalysis>();
10345       PA.preserve<DominatorTreeAnalysis>();
10346     }
10347     if (!Result.MadeCFGChange)
10348       PA.preserveSet<CFGAnalyses>();
10349     return PA;
10350 }
10351