1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 /// The number of stores in a loop that are allowed to need predication.
311 static cl::opt<unsigned> NumberOfStoresToPredicate(
312     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313     cl::desc("Max number of stores to be predicated behind an if."));
314 
315 static cl::opt<bool> EnableIndVarRegisterHeur(
316     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317     cl::desc("Count the induction variable only once when interleaving"));
318 
319 static cl::opt<bool> EnableCondStoresVectorization(
320     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321     cl::desc("Enable if predication of stores during vectorization."));
322 
323 static cl::opt<unsigned> MaxNestedScalarReductionIC(
324     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325     cl::desc("The maximum interleave count to use when interleaving a scalar "
326              "reduction in a nested loop."));
327 
328 static cl::opt<bool>
329     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330                            cl::Hidden,
331                            cl::desc("Prefer in-loop vector reductions, "
332                                     "overriding the targets preference."));
333 
334 static cl::opt<bool> ForceOrderedReductions(
335     "force-ordered-reductions", cl::init(false), cl::Hidden,
336     cl::desc("Enable the vectorisation of loops with in-order (strict) "
337              "FP reductions"));
338 
339 static cl::opt<bool> PreferPredicatedReductionSelect(
340     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341     cl::desc(
342         "Prefer predicating a reduction operation over an after loop select."));
343 
344 cl::opt<bool> EnableVPlanNativePath(
345     "enable-vplan-native-path", cl::init(false), cl::Hidden,
346     cl::desc("Enable VPlan-native vectorization path with "
347              "support for outer loop vectorization."));
348 
349 // FIXME: Remove this switch once we have divergence analysis. Currently we
350 // assume divergent non-backedge branches when this switch is true.
351 cl::opt<bool> EnableVPlanPredication(
352     "enable-vplan-predication", cl::init(false), cl::Hidden,
353     cl::desc("Enable VPlan-native vectorization path predicator with "
354              "support for outer loop vectorization."));
355 
356 // This flag enables the stress testing of the VPlan H-CFG construction in the
357 // VPlan-native vectorization path. It must be used in conjuction with
358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359 // verification of the H-CFGs built.
360 static cl::opt<bool> VPlanBuildStressTest(
361     "vplan-build-stress-test", cl::init(false), cl::Hidden,
362     cl::desc(
363         "Build VPlan for every supported loop nest in the function and bail "
364         "out right after the build (stress test the VPlan H-CFG construction "
365         "in the VPlan-native vectorization path)."));
366 
367 cl::opt<bool> llvm::EnableLoopInterleaving(
368     "interleave-loops", cl::init(true), cl::Hidden,
369     cl::desc("Enable loop interleaving in Loop vectorization passes"));
370 cl::opt<bool> llvm::EnableLoopVectorization(
371     "vectorize-loops", cl::init(true), cl::Hidden,
372     cl::desc("Run the Loop vectorization passes"));
373 
374 cl::opt<bool> PrintVPlansInDotFormat(
375     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376     cl::desc("Use dot format instead of plain text when dumping VPlans"));
377 
378 /// A helper function that returns true if the given type is irregular. The
379 /// type is irregular if its allocated size doesn't equal the store size of an
380 /// element of the corresponding vector type.
381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
382   // Determine if an array of N elements of type Ty is "bitcast compatible"
383   // with a <N x Ty> vector.
384   // This is only true if there is no padding between the array elements.
385   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
386 }
387 
388 /// A helper function that returns the reciprocal of the block probability of
389 /// predicated blocks. If we return X, we are assuming the predicated block
390 /// will execute once for every X iterations of the loop header.
391 ///
392 /// TODO: We should use actual block probability here, if available. Currently,
393 ///       we always assume predicated blocks have a 50% chance of executing.
394 static unsigned getReciprocalPredBlockProb() { return 2; }
395 
396 /// A helper function that returns an integer or floating-point constant with
397 /// value C.
398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
399   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
400                            : ConstantFP::get(Ty, C);
401 }
402 
403 /// Returns "best known" trip count for the specified loop \p L as defined by
404 /// the following procedure:
405 ///   1) Returns exact trip count if it is known.
406 ///   2) Returns expected trip count according to profile data if any.
407 ///   3) Returns upper bound estimate if it is known.
408 ///   4) Returns None if all of the above failed.
409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
410   // Check if exact trip count is known.
411   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
412     return ExpectedTC;
413 
414   // Check if there is an expected trip count available from profile data.
415   if (LoopVectorizeWithBlockFrequency)
416     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
417       return EstimatedTC;
418 
419   // Check if upper bound estimate is known.
420   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
421     return ExpectedTC;
422 
423   return None;
424 }
425 
426 // Forward declare GeneratedRTChecks.
427 class GeneratedRTChecks;
428 
429 namespace llvm {
430 
431 AnalysisKey ShouldRunExtraVectorPasses::Key;
432 
433 /// InnerLoopVectorizer vectorizes loops which contain only one basic
434 /// block to a specified vectorization factor (VF).
435 /// This class performs the widening of scalars into vectors, or multiple
436 /// scalars. This class also implements the following features:
437 /// * It inserts an epilogue loop for handling loops that don't have iteration
438 ///   counts that are known to be a multiple of the vectorization factor.
439 /// * It handles the code generation for reduction variables.
440 /// * Scalarization (implementation using scalars) of un-vectorizable
441 ///   instructions.
442 /// InnerLoopVectorizer does not perform any vectorization-legality
443 /// checks, and relies on the caller to check for the different legality
444 /// aspects. The InnerLoopVectorizer relies on the
445 /// LoopVectorizationLegality class to provide information about the induction
446 /// and reduction variables that were found to a given vectorization factor.
447 class InnerLoopVectorizer {
448 public:
449   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
450                       LoopInfo *LI, DominatorTree *DT,
451                       const TargetLibraryInfo *TLI,
452                       const TargetTransformInfo *TTI, AssumptionCache *AC,
453                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
454                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
455                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
456                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
457       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
458         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
459         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
460         PSI(PSI), RTChecks(RTChecks) {
461     // Query this against the original loop and save it here because the profile
462     // of the original loop header may change as the transformation happens.
463     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
464         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
465   }
466 
467   virtual ~InnerLoopVectorizer() = default;
468 
469   /// Create a new empty loop that will contain vectorized instructions later
470   /// on, while the old loop will be used as the scalar remainder. Control flow
471   /// is generated around the vectorized (and scalar epilogue) loops consisting
472   /// of various checks and bypasses. Return the pre-header block of the new
473   /// loop and the start value for the canonical induction, if it is != 0. The
474   /// latter is the case when vectorizing the epilogue loop. In the case of
475   /// epilogue vectorization, this function is overriden to handle the more
476   /// complex control flow around the loops.
477   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
478 
479   /// Widen a single call instruction within the innermost loop.
480   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
481                             VPTransformState &State);
482 
483   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
484   void fixVectorizedLoop(VPTransformState &State);
485 
486   // Return true if any runtime check is added.
487   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
488 
489   /// A type for vectorized values in the new loop. Each value from the
490   /// original loop, when vectorized, is represented by UF vector values in the
491   /// new unrolled loop, where UF is the unroll factor.
492   using VectorParts = SmallVector<Value *, 2>;
493 
494   /// Vectorize a single first-order recurrence or pointer induction PHINode in
495   /// a block. This method handles the induction variable canonicalization. It
496   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
497   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
498                            VPTransformState &State);
499 
500   /// A helper function to scalarize a single Instruction in the innermost loop.
501   /// Generates a sequence of scalar instances for each lane between \p MinLane
502   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
503   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
504   /// Instr's operands.
505   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
506                             const VPIteration &Instance, bool IfPredicateInstr,
507                             VPTransformState &State);
508 
509   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
510   /// is provided, the integer induction variable will first be truncated to
511   /// the corresponding type. \p CanonicalIV is the scalar value generated for
512   /// the canonical induction variable.
513   void widenIntOrFpInduction(PHINode *IV, const InductionDescriptor &ID,
514                              Value *Start, TruncInst *Trunc, VPValue *Def,
515                              VPTransformState &State, Value *CanonicalIV);
516 
517   /// Construct the vector value of a scalarized value \p V one lane at a time.
518   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
519                                  VPTransformState &State);
520 
521   /// Try to vectorize interleaved access group \p Group with the base address
522   /// given in \p Addr, optionally masking the vector operations if \p
523   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
524   /// values in the vectorized loop.
525   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
526                                 ArrayRef<VPValue *> VPDefs,
527                                 VPTransformState &State, VPValue *Addr,
528                                 ArrayRef<VPValue *> StoredValues,
529                                 VPValue *BlockInMask = nullptr);
530 
531   /// Set the debug location in the builder \p Ptr using the debug location in
532   /// \p V. If \p Ptr is None then it uses the class member's Builder.
533   void setDebugLocFromInst(const Value *V,
534                            Optional<IRBuilder<> *> CustomBuilder = None);
535 
536   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
537   void fixNonInductionPHIs(VPTransformState &State);
538 
539   /// Returns true if the reordering of FP operations is not allowed, but we are
540   /// able to vectorize with strict in-order reductions for the given RdxDesc.
541   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
542 
543   /// Create a broadcast instruction. This method generates a broadcast
544   /// instruction (shuffle) for loop invariant values and for the induction
545   /// value. If this is the induction variable then we extend it to N, N+1, ...
546   /// this is needed because each iteration in the loop corresponds to a SIMD
547   /// element.
548   virtual Value *getBroadcastInstrs(Value *V);
549 
550   /// Add metadata from one instruction to another.
551   ///
552   /// This includes both the original MDs from \p From and additional ones (\see
553   /// addNewMetadata).  Use this for *newly created* instructions in the vector
554   /// loop.
555   void addMetadata(Instruction *To, Instruction *From);
556 
557   /// Similar to the previous function but it adds the metadata to a
558   /// vector of instructions.
559   void addMetadata(ArrayRef<Value *> To, Instruction *From);
560 
561 protected:
562   friend class LoopVectorizationPlanner;
563 
564   /// A small list of PHINodes.
565   using PhiVector = SmallVector<PHINode *, 4>;
566 
567   /// A type for scalarized values in the new loop. Each value from the
568   /// original loop, when scalarized, is represented by UF x VF scalar values
569   /// in the new unrolled loop, where UF is the unroll factor and VF is the
570   /// vectorization factor.
571   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
572 
573   /// Set up the values of the IVs correctly when exiting the vector loop.
574   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
575                     Value *CountRoundDown, Value *EndValue,
576                     BasicBlock *MiddleBlock);
577 
578   /// Introduce a conditional branch (on true, condition to be set later) at the
579   /// end of the header=latch connecting it to itself (across the backedge) and
580   /// to the exit block of \p L.
581   void createHeaderBranch(Loop *L);
582 
583   /// Handle all cross-iteration phis in the header.
584   void fixCrossIterationPHIs(VPTransformState &State);
585 
586   /// Create the exit value of first order recurrences in the middle block and
587   /// update their users.
588   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
589                                VPTransformState &State);
590 
591   /// Create code for the loop exit value of the reduction.
592   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
593 
594   /// Clear NSW/NUW flags from reduction instructions if necessary.
595   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
596                                VPTransformState &State);
597 
598   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
599   /// means we need to add the appropriate incoming value from the middle
600   /// block as exiting edges from the scalar epilogue loop (if present) are
601   /// already in place, and we exit the vector loop exclusively to the middle
602   /// block.
603   void fixLCSSAPHIs(VPTransformState &State);
604 
605   /// Iteratively sink the scalarized operands of a predicated instruction into
606   /// the block that was created for it.
607   void sinkScalarOperands(Instruction *PredInst);
608 
609   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
610   /// represented as.
611   void truncateToMinimalBitwidths(VPTransformState &State);
612 
613   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
614   /// variable on which to base the steps, \p Step is the size of the step, and
615   /// \p EntryVal is the value from the original loop that maps to the steps.
616   /// Note that \p EntryVal doesn't have to be an induction variable - it
617   /// can also be a truncate instruction.
618   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
619                         const InductionDescriptor &ID, VPValue *Def,
620                         VPTransformState &State);
621 
622   /// Create a vector induction phi node based on an existing scalar one. \p
623   /// EntryVal is the value from the original loop that maps to the vector phi
624   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
625   /// truncate instruction, instead of widening the original IV, we widen a
626   /// version of the IV truncated to \p EntryVal's type.
627   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
628                                        Value *Step, Value *Start,
629                                        Instruction *EntryVal, VPValue *Def,
630                                        VPTransformState &State);
631 
632   /// Returns true if an instruction \p I should be scalarized instead of
633   /// vectorized for the chosen vectorization factor.
634   bool shouldScalarizeInstruction(Instruction *I) const;
635 
636   /// Returns true if we should generate a scalar version of \p IV.
637   bool needsScalarInduction(Instruction *IV) const;
638 
639   /// Returns (and creates if needed) the original loop trip count.
640   Value *getOrCreateTripCount(Loop *NewLoop);
641 
642   /// Returns (and creates if needed) the trip count of the widened loop.
643   Value *getOrCreateVectorTripCount(Loop *NewLoop);
644 
645   /// Returns a bitcasted value to the requested vector type.
646   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
647   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
648                                 const DataLayout &DL);
649 
650   /// Emit a bypass check to see if the vector trip count is zero, including if
651   /// it overflows.
652   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
653 
654   /// Emit a bypass check to see if all of the SCEV assumptions we've
655   /// had to make are correct. Returns the block containing the checks or
656   /// nullptr if no checks have been added.
657   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
658 
659   /// Emit bypass checks to check any memory assumptions we may have made.
660   /// Returns the block containing the checks or nullptr if no checks have been
661   /// added.
662   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
663 
664   /// Compute the transformed value of Index at offset StartValue using step
665   /// StepValue.
666   /// For integer induction, returns StartValue + Index * StepValue.
667   /// For pointer induction, returns StartValue[Index * StepValue].
668   /// FIXME: The newly created binary instructions should contain nsw/nuw
669   /// flags, which can be found from the original scalar operations.
670   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
671                               const DataLayout &DL,
672                               const InductionDescriptor &ID,
673                               BasicBlock *VectorHeader) const;
674 
675   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
676   /// vector loop preheader, middle block and scalar preheader. Also
677   /// allocate a loop object for the new vector loop and return it.
678   Loop *createVectorLoopSkeleton(StringRef Prefix);
679 
680   /// Create new phi nodes for the induction variables to resume iteration count
681   /// in the scalar epilogue, from where the vectorized loop left off.
682   /// In cases where the loop skeleton is more complicated (eg. epilogue
683   /// vectorization) and the resume values can come from an additional bypass
684   /// block, the \p AdditionalBypass pair provides information about the bypass
685   /// block and the end value on the edge from bypass to this loop.
686   void createInductionResumeValues(
687       Loop *L,
688       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
689 
690   /// Complete the loop skeleton by adding debug MDs, creating appropriate
691   /// conditional branches in the middle block, preparing the builder and
692   /// running the verifier. Take in the vector loop \p L as argument, and return
693   /// the preheader of the completed vector loop.
694   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
695 
696   /// Add additional metadata to \p To that was not present on \p Orig.
697   ///
698   /// Currently this is used to add the noalias annotations based on the
699   /// inserted memchecks.  Use this for instructions that are *cloned* into the
700   /// vector loop.
701   void addNewMetadata(Instruction *To, const Instruction *Orig);
702 
703   /// Collect poison-generating recipes that may generate a poison value that is
704   /// used after vectorization, even when their operands are not poison. Those
705   /// recipes meet the following conditions:
706   ///  * Contribute to the address computation of a recipe generating a widen
707   ///    memory load/store (VPWidenMemoryInstructionRecipe or
708   ///    VPInterleaveRecipe).
709   ///  * Such a widen memory load/store has at least one underlying Instruction
710   ///    that is in a basic block that needs predication and after vectorization
711   ///    the generated instruction won't be predicated.
712   void collectPoisonGeneratingRecipes(VPTransformState &State);
713 
714   /// Allow subclasses to override and print debug traces before/after vplan
715   /// execution, when trace information is requested.
716   virtual void printDebugTracesAtStart(){};
717   virtual void printDebugTracesAtEnd(){};
718 
719   /// The original loop.
720   Loop *OrigLoop;
721 
722   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
723   /// dynamic knowledge to simplify SCEV expressions and converts them to a
724   /// more usable form.
725   PredicatedScalarEvolution &PSE;
726 
727   /// Loop Info.
728   LoopInfo *LI;
729 
730   /// Dominator Tree.
731   DominatorTree *DT;
732 
733   /// Alias Analysis.
734   AAResults *AA;
735 
736   /// Target Library Info.
737   const TargetLibraryInfo *TLI;
738 
739   /// Target Transform Info.
740   const TargetTransformInfo *TTI;
741 
742   /// Assumption Cache.
743   AssumptionCache *AC;
744 
745   /// Interface to emit optimization remarks.
746   OptimizationRemarkEmitter *ORE;
747 
748   /// LoopVersioning.  It's only set up (non-null) if memchecks were
749   /// used.
750   ///
751   /// This is currently only used to add no-alias metadata based on the
752   /// memchecks.  The actually versioning is performed manually.
753   std::unique_ptr<LoopVersioning> LVer;
754 
755   /// The vectorization SIMD factor to use. Each vector will have this many
756   /// vector elements.
757   ElementCount VF;
758 
759   /// The vectorization unroll factor to use. Each scalar is vectorized to this
760   /// many different vector instructions.
761   unsigned UF;
762 
763   /// The builder that we use
764   IRBuilder<> Builder;
765 
766   // --- Vectorization state ---
767 
768   /// The vector-loop preheader.
769   BasicBlock *LoopVectorPreHeader;
770 
771   /// The scalar-loop preheader.
772   BasicBlock *LoopScalarPreHeader;
773 
774   /// Middle Block between the vector and the scalar.
775   BasicBlock *LoopMiddleBlock;
776 
777   /// The unique ExitBlock of the scalar loop if one exists.  Note that
778   /// there can be multiple exiting edges reaching this block.
779   BasicBlock *LoopExitBlock;
780 
781   /// The vector loop body.
782   BasicBlock *LoopVectorBody;
783 
784   /// The scalar loop body.
785   BasicBlock *LoopScalarBody;
786 
787   /// A list of all bypass blocks. The first block is the entry of the loop.
788   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
789 
790   /// Store instructions that were predicated.
791   SmallVector<Instruction *, 4> PredicatedInstructions;
792 
793   /// Trip count of the original loop.
794   Value *TripCount = nullptr;
795 
796   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
797   Value *VectorTripCount = nullptr;
798 
799   /// The legality analysis.
800   LoopVectorizationLegality *Legal;
801 
802   /// The profitablity analysis.
803   LoopVectorizationCostModel *Cost;
804 
805   // Record whether runtime checks are added.
806   bool AddedSafetyChecks = false;
807 
808   // Holds the end values for each induction variable. We save the end values
809   // so we can later fix-up the external users of the induction variables.
810   DenseMap<PHINode *, Value *> IVEndValues;
811 
812   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
813   // fixed up at the end of vector code generation.
814   SmallVector<PHINode *, 8> OrigPHIsToFix;
815 
816   /// BFI and PSI are used to check for profile guided size optimizations.
817   BlockFrequencyInfo *BFI;
818   ProfileSummaryInfo *PSI;
819 
820   // Whether this loop should be optimized for size based on profile guided size
821   // optimizatios.
822   bool OptForSizeBasedOnProfile;
823 
824   /// Structure to hold information about generated runtime checks, responsible
825   /// for cleaning the checks, if vectorization turns out unprofitable.
826   GeneratedRTChecks &RTChecks;
827 };
828 
829 class InnerLoopUnroller : public InnerLoopVectorizer {
830 public:
831   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
832                     LoopInfo *LI, DominatorTree *DT,
833                     const TargetLibraryInfo *TLI,
834                     const TargetTransformInfo *TTI, AssumptionCache *AC,
835                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
836                     LoopVectorizationLegality *LVL,
837                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
838                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
839       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
840                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
841                             BFI, PSI, Check) {}
842 
843 private:
844   Value *getBroadcastInstrs(Value *V) override;
845 };
846 
847 /// Encapsulate information regarding vectorization of a loop and its epilogue.
848 /// This information is meant to be updated and used across two stages of
849 /// epilogue vectorization.
850 struct EpilogueLoopVectorizationInfo {
851   ElementCount MainLoopVF = ElementCount::getFixed(0);
852   unsigned MainLoopUF = 0;
853   ElementCount EpilogueVF = ElementCount::getFixed(0);
854   unsigned EpilogueUF = 0;
855   BasicBlock *MainLoopIterationCountCheck = nullptr;
856   BasicBlock *EpilogueIterationCountCheck = nullptr;
857   BasicBlock *SCEVSafetyCheck = nullptr;
858   BasicBlock *MemSafetyCheck = nullptr;
859   Value *TripCount = nullptr;
860   Value *VectorTripCount = nullptr;
861 
862   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
863                                 ElementCount EVF, unsigned EUF)
864       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
865     assert(EUF == 1 &&
866            "A high UF for the epilogue loop is likely not beneficial.");
867   }
868 };
869 
870 /// An extension of the inner loop vectorizer that creates a skeleton for a
871 /// vectorized loop that has its epilogue (residual) also vectorized.
872 /// The idea is to run the vplan on a given loop twice, firstly to setup the
873 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
874 /// from the first step and vectorize the epilogue.  This is achieved by
875 /// deriving two concrete strategy classes from this base class and invoking
876 /// them in succession from the loop vectorizer planner.
877 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
878 public:
879   InnerLoopAndEpilogueVectorizer(
880       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
881       DominatorTree *DT, const TargetLibraryInfo *TLI,
882       const TargetTransformInfo *TTI, AssumptionCache *AC,
883       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
884       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
885       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
886       GeneratedRTChecks &Checks)
887       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
888                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
889                             Checks),
890         EPI(EPI) {}
891 
892   // Override this function to handle the more complex control flow around the
893   // three loops.
894   std::pair<BasicBlock *, Value *>
895   createVectorizedLoopSkeleton() final override {
896     return createEpilogueVectorizedLoopSkeleton();
897   }
898 
899   /// The interface for creating a vectorized skeleton using one of two
900   /// different strategies, each corresponding to one execution of the vplan
901   /// as described above.
902   virtual std::pair<BasicBlock *, Value *>
903   createEpilogueVectorizedLoopSkeleton() = 0;
904 
905   /// Holds and updates state information required to vectorize the main loop
906   /// and its epilogue in two separate passes. This setup helps us avoid
907   /// regenerating and recomputing runtime safety checks. It also helps us to
908   /// shorten the iteration-count-check path length for the cases where the
909   /// iteration count of the loop is so small that the main vector loop is
910   /// completely skipped.
911   EpilogueLoopVectorizationInfo &EPI;
912 };
913 
914 /// A specialized derived class of inner loop vectorizer that performs
915 /// vectorization of *main* loops in the process of vectorizing loops and their
916 /// epilogues.
917 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
918 public:
919   EpilogueVectorizerMainLoop(
920       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
921       DominatorTree *DT, const TargetLibraryInfo *TLI,
922       const TargetTransformInfo *TTI, AssumptionCache *AC,
923       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
924       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
925       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
926       GeneratedRTChecks &Check)
927       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
928                                        EPI, LVL, CM, BFI, PSI, Check) {}
929   /// Implements the interface for creating a vectorized skeleton using the
930   /// *main loop* strategy (ie the first pass of vplan execution).
931   std::pair<BasicBlock *, Value *>
932   createEpilogueVectorizedLoopSkeleton() final override;
933 
934 protected:
935   /// Emits an iteration count bypass check once for the main loop (when \p
936   /// ForEpilogue is false) and once for the epilogue loop (when \p
937   /// ForEpilogue is true).
938   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
939                                              bool ForEpilogue);
940   void printDebugTracesAtStart() override;
941   void printDebugTracesAtEnd() override;
942 };
943 
944 // A specialized derived class of inner loop vectorizer that performs
945 // vectorization of *epilogue* loops in the process of vectorizing loops and
946 // their epilogues.
947 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
948 public:
949   EpilogueVectorizerEpilogueLoop(
950       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
951       DominatorTree *DT, const TargetLibraryInfo *TLI,
952       const TargetTransformInfo *TTI, AssumptionCache *AC,
953       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
954       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
955       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
956       GeneratedRTChecks &Checks)
957       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
958                                        EPI, LVL, CM, BFI, PSI, Checks) {}
959   /// Implements the interface for creating a vectorized skeleton using the
960   /// *epilogue loop* strategy (ie the second pass of vplan execution).
961   std::pair<BasicBlock *, Value *>
962   createEpilogueVectorizedLoopSkeleton() final override;
963 
964 protected:
965   /// Emits an iteration count bypass check after the main vector loop has
966   /// finished to see if there are any iterations left to execute by either
967   /// the vector epilogue or the scalar epilogue.
968   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
969                                                       BasicBlock *Bypass,
970                                                       BasicBlock *Insert);
971   void printDebugTracesAtStart() override;
972   void printDebugTracesAtEnd() override;
973 };
974 } // end namespace llvm
975 
976 /// Look for a meaningful debug location on the instruction or it's
977 /// operands.
978 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
979   if (!I)
980     return I;
981 
982   DebugLoc Empty;
983   if (I->getDebugLoc() != Empty)
984     return I;
985 
986   for (Use &Op : I->operands()) {
987     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
988       if (OpInst->getDebugLoc() != Empty)
989         return OpInst;
990   }
991 
992   return I;
993 }
994 
995 void InnerLoopVectorizer::setDebugLocFromInst(
996     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
997   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
998   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
999     const DILocation *DIL = Inst->getDebugLoc();
1000 
1001     // When a FSDiscriminator is enabled, we don't need to add the multiply
1002     // factors to the discriminators.
1003     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1004         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1005       // FIXME: For scalable vectors, assume vscale=1.
1006       auto NewDIL =
1007           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1008       if (NewDIL)
1009         B->SetCurrentDebugLocation(NewDIL.getValue());
1010       else
1011         LLVM_DEBUG(dbgs()
1012                    << "Failed to create new discriminator: "
1013                    << DIL->getFilename() << " Line: " << DIL->getLine());
1014     } else
1015       B->SetCurrentDebugLocation(DIL);
1016   } else
1017     B->SetCurrentDebugLocation(DebugLoc());
1018 }
1019 
1020 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1021 /// is passed, the message relates to that particular instruction.
1022 #ifndef NDEBUG
1023 static void debugVectorizationMessage(const StringRef Prefix,
1024                                       const StringRef DebugMsg,
1025                                       Instruction *I) {
1026   dbgs() << "LV: " << Prefix << DebugMsg;
1027   if (I != nullptr)
1028     dbgs() << " " << *I;
1029   else
1030     dbgs() << '.';
1031   dbgs() << '\n';
1032 }
1033 #endif
1034 
1035 /// Create an analysis remark that explains why vectorization failed
1036 ///
1037 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1038 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1039 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1040 /// the location of the remark.  \return the remark object that can be
1041 /// streamed to.
1042 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1043     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1044   Value *CodeRegion = TheLoop->getHeader();
1045   DebugLoc DL = TheLoop->getStartLoc();
1046 
1047   if (I) {
1048     CodeRegion = I->getParent();
1049     // If there is no debug location attached to the instruction, revert back to
1050     // using the loop's.
1051     if (I->getDebugLoc())
1052       DL = I->getDebugLoc();
1053   }
1054 
1055   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1056 }
1057 
1058 namespace llvm {
1059 
1060 /// Return a value for Step multiplied by VF.
1061 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
1062                        int64_t Step) {
1063   assert(Ty->isIntegerTy() && "Expected an integer step");
1064   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1065   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1066 }
1067 
1068 /// Return the runtime value for VF.
1069 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1070   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1071   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1072 }
1073 
1074 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) {
1075   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1076   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1077   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1078   return B.CreateUIToFP(RuntimeVF, FTy);
1079 }
1080 
1081 void reportVectorizationFailure(const StringRef DebugMsg,
1082                                 const StringRef OREMsg, const StringRef ORETag,
1083                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1084                                 Instruction *I) {
1085   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1086   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1087   ORE->emit(
1088       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1089       << "loop not vectorized: " << OREMsg);
1090 }
1091 
1092 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1093                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1094                              Instruction *I) {
1095   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1096   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1097   ORE->emit(
1098       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1099       << Msg);
1100 }
1101 
1102 } // end namespace llvm
1103 
1104 #ifndef NDEBUG
1105 /// \return string containing a file name and a line # for the given loop.
1106 static std::string getDebugLocString(const Loop *L) {
1107   std::string Result;
1108   if (L) {
1109     raw_string_ostream OS(Result);
1110     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1111       LoopDbgLoc.print(OS);
1112     else
1113       // Just print the module name.
1114       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1115     OS.flush();
1116   }
1117   return Result;
1118 }
1119 #endif
1120 
1121 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1122                                          const Instruction *Orig) {
1123   // If the loop was versioned with memchecks, add the corresponding no-alias
1124   // metadata.
1125   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1126     LVer->annotateInstWithNoAlias(To, Orig);
1127 }
1128 
1129 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1130     VPTransformState &State) {
1131 
1132   // Collect recipes in the backward slice of `Root` that may generate a poison
1133   // value that is used after vectorization.
1134   SmallPtrSet<VPRecipeBase *, 16> Visited;
1135   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1136     SmallVector<VPRecipeBase *, 16> Worklist;
1137     Worklist.push_back(Root);
1138 
1139     // Traverse the backward slice of Root through its use-def chain.
1140     while (!Worklist.empty()) {
1141       VPRecipeBase *CurRec = Worklist.back();
1142       Worklist.pop_back();
1143 
1144       if (!Visited.insert(CurRec).second)
1145         continue;
1146 
1147       // Prune search if we find another recipe generating a widen memory
1148       // instruction. Widen memory instructions involved in address computation
1149       // will lead to gather/scatter instructions, which don't need to be
1150       // handled.
1151       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1152           isa<VPInterleaveRecipe>(CurRec) ||
1153           isa<VPCanonicalIVPHIRecipe>(CurRec))
1154         continue;
1155 
1156       // This recipe contributes to the address computation of a widen
1157       // load/store. Collect recipe if its underlying instruction has
1158       // poison-generating flags.
1159       Instruction *Instr = CurRec->getUnderlyingInstr();
1160       if (Instr && Instr->hasPoisonGeneratingFlags())
1161         State.MayGeneratePoisonRecipes.insert(CurRec);
1162 
1163       // Add new definitions to the worklist.
1164       for (VPValue *operand : CurRec->operands())
1165         if (VPDef *OpDef = operand->getDef())
1166           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1167     }
1168   });
1169 
1170   // Traverse all the recipes in the VPlan and collect the poison-generating
1171   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1172   // VPInterleaveRecipe.
1173   auto Iter = depth_first(
1174       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1175   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1176     for (VPRecipeBase &Recipe : *VPBB) {
1177       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1178         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1179         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1180         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1181             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1182           collectPoisonGeneratingInstrsInBackwardSlice(
1183               cast<VPRecipeBase>(AddrDef));
1184       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1185         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1186         if (AddrDef) {
1187           // Check if any member of the interleave group needs predication.
1188           const InterleaveGroup<Instruction> *InterGroup =
1189               InterleaveRec->getInterleaveGroup();
1190           bool NeedPredication = false;
1191           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1192                I < NumMembers; ++I) {
1193             Instruction *Member = InterGroup->getMember(I);
1194             if (Member)
1195               NeedPredication |=
1196                   Legal->blockNeedsPredication(Member->getParent());
1197           }
1198 
1199           if (NeedPredication)
1200             collectPoisonGeneratingInstrsInBackwardSlice(
1201                 cast<VPRecipeBase>(AddrDef));
1202         }
1203       }
1204     }
1205   }
1206 }
1207 
1208 void InnerLoopVectorizer::addMetadata(Instruction *To,
1209                                       Instruction *From) {
1210   propagateMetadata(To, From);
1211   addNewMetadata(To, From);
1212 }
1213 
1214 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1215                                       Instruction *From) {
1216   for (Value *V : To) {
1217     if (Instruction *I = dyn_cast<Instruction>(V))
1218       addMetadata(I, From);
1219   }
1220 }
1221 
1222 namespace llvm {
1223 
1224 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1225 // lowered.
1226 enum ScalarEpilogueLowering {
1227 
1228   // The default: allowing scalar epilogues.
1229   CM_ScalarEpilogueAllowed,
1230 
1231   // Vectorization with OptForSize: don't allow epilogues.
1232   CM_ScalarEpilogueNotAllowedOptSize,
1233 
1234   // A special case of vectorisation with OptForSize: loops with a very small
1235   // trip count are considered for vectorization under OptForSize, thereby
1236   // making sure the cost of their loop body is dominant, free of runtime
1237   // guards and scalar iteration overheads.
1238   CM_ScalarEpilogueNotAllowedLowTripLoop,
1239 
1240   // Loop hint predicate indicating an epilogue is undesired.
1241   CM_ScalarEpilogueNotNeededUsePredicate,
1242 
1243   // Directive indicating we must either tail fold or not vectorize
1244   CM_ScalarEpilogueNotAllowedUsePredicate
1245 };
1246 
1247 /// ElementCountComparator creates a total ordering for ElementCount
1248 /// for the purposes of using it in a set structure.
1249 struct ElementCountComparator {
1250   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1251     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1252            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1253   }
1254 };
1255 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1256 
1257 /// LoopVectorizationCostModel - estimates the expected speedups due to
1258 /// vectorization.
1259 /// In many cases vectorization is not profitable. This can happen because of
1260 /// a number of reasons. In this class we mainly attempt to predict the
1261 /// expected speedup/slowdowns due to the supported instruction set. We use the
1262 /// TargetTransformInfo to query the different backends for the cost of
1263 /// different operations.
1264 class LoopVectorizationCostModel {
1265 public:
1266   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1267                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1268                              LoopVectorizationLegality *Legal,
1269                              const TargetTransformInfo &TTI,
1270                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1271                              AssumptionCache *AC,
1272                              OptimizationRemarkEmitter *ORE, const Function *F,
1273                              const LoopVectorizeHints *Hints,
1274                              InterleavedAccessInfo &IAI)
1275       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1276         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1277         Hints(Hints), InterleaveInfo(IAI) {}
1278 
1279   /// \return An upper bound for the vectorization factors (both fixed and
1280   /// scalable). If the factors are 0, vectorization and interleaving should be
1281   /// avoided up front.
1282   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1283 
1284   /// \return True if runtime checks are required for vectorization, and false
1285   /// otherwise.
1286   bool runtimeChecksRequired();
1287 
1288   /// \return The most profitable vectorization factor and the cost of that VF.
1289   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1290   /// then this vectorization factor will be selected if vectorization is
1291   /// possible.
1292   VectorizationFactor
1293   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1294 
1295   VectorizationFactor
1296   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1297                                     const LoopVectorizationPlanner &LVP);
1298 
1299   /// Setup cost-based decisions for user vectorization factor.
1300   /// \return true if the UserVF is a feasible VF to be chosen.
1301   bool selectUserVectorizationFactor(ElementCount UserVF) {
1302     collectUniformsAndScalars(UserVF);
1303     collectInstsToScalarize(UserVF);
1304     return expectedCost(UserVF).first.isValid();
1305   }
1306 
1307   /// \return The size (in bits) of the smallest and widest types in the code
1308   /// that needs to be vectorized. We ignore values that remain scalar such as
1309   /// 64 bit loop indices.
1310   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1311 
1312   /// \return The desired interleave count.
1313   /// If interleave count has been specified by metadata it will be returned.
1314   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1315   /// are the selected vectorization factor and the cost of the selected VF.
1316   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1317 
1318   /// Memory access instruction may be vectorized in more than one way.
1319   /// Form of instruction after vectorization depends on cost.
1320   /// This function takes cost-based decisions for Load/Store instructions
1321   /// and collects them in a map. This decisions map is used for building
1322   /// the lists of loop-uniform and loop-scalar instructions.
1323   /// The calculated cost is saved with widening decision in order to
1324   /// avoid redundant calculations.
1325   void setCostBasedWideningDecision(ElementCount VF);
1326 
1327   /// A struct that represents some properties of the register usage
1328   /// of a loop.
1329   struct RegisterUsage {
1330     /// Holds the number of loop invariant values that are used in the loop.
1331     /// The key is ClassID of target-provided register class.
1332     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1333     /// Holds the maximum number of concurrent live intervals in the loop.
1334     /// The key is ClassID of target-provided register class.
1335     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1336   };
1337 
1338   /// \return Returns information about the register usages of the loop for the
1339   /// given vectorization factors.
1340   SmallVector<RegisterUsage, 8>
1341   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1342 
1343   /// Collect values we want to ignore in the cost model.
1344   void collectValuesToIgnore();
1345 
1346   /// Collect all element types in the loop for which widening is needed.
1347   void collectElementTypesForWidening();
1348 
1349   /// Split reductions into those that happen in the loop, and those that happen
1350   /// outside. In loop reductions are collected into InLoopReductionChains.
1351   void collectInLoopReductions();
1352 
1353   /// Returns true if we should use strict in-order reductions for the given
1354   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1355   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1356   /// of FP operations.
1357   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1358     return !Hints->allowReordering() && RdxDesc.isOrdered();
1359   }
1360 
1361   /// \returns The smallest bitwidth each instruction can be represented with.
1362   /// The vector equivalents of these instructions should be truncated to this
1363   /// type.
1364   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1365     return MinBWs;
1366   }
1367 
1368   /// \returns True if it is more profitable to scalarize instruction \p I for
1369   /// vectorization factor \p VF.
1370   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1371     assert(VF.isVector() &&
1372            "Profitable to scalarize relevant only for VF > 1.");
1373 
1374     // Cost model is not run in the VPlan-native path - return conservative
1375     // result until this changes.
1376     if (EnableVPlanNativePath)
1377       return false;
1378 
1379     auto Scalars = InstsToScalarize.find(VF);
1380     assert(Scalars != InstsToScalarize.end() &&
1381            "VF not yet analyzed for scalarization profitability");
1382     return Scalars->second.find(I) != Scalars->second.end();
1383   }
1384 
1385   /// Returns true if \p I is known to be uniform after vectorization.
1386   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1387     if (VF.isScalar())
1388       return true;
1389 
1390     // Cost model is not run in the VPlan-native path - return conservative
1391     // result until this changes.
1392     if (EnableVPlanNativePath)
1393       return false;
1394 
1395     auto UniformsPerVF = Uniforms.find(VF);
1396     assert(UniformsPerVF != Uniforms.end() &&
1397            "VF not yet analyzed for uniformity");
1398     return UniformsPerVF->second.count(I);
1399   }
1400 
1401   /// Returns true if \p I is known to be scalar after vectorization.
1402   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1403     if (VF.isScalar())
1404       return true;
1405 
1406     // Cost model is not run in the VPlan-native path - return conservative
1407     // result until this changes.
1408     if (EnableVPlanNativePath)
1409       return false;
1410 
1411     auto ScalarsPerVF = Scalars.find(VF);
1412     assert(ScalarsPerVF != Scalars.end() &&
1413            "Scalar values are not calculated for VF");
1414     return ScalarsPerVF->second.count(I);
1415   }
1416 
1417   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1418   /// for vectorization factor \p VF.
1419   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1420     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1421            !isProfitableToScalarize(I, VF) &&
1422            !isScalarAfterVectorization(I, VF);
1423   }
1424 
1425   /// Decision that was taken during cost calculation for memory instruction.
1426   enum InstWidening {
1427     CM_Unknown,
1428     CM_Widen,         // For consecutive accesses with stride +1.
1429     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1430     CM_Interleave,
1431     CM_GatherScatter,
1432     CM_Scalarize
1433   };
1434 
1435   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1436   /// instruction \p I and vector width \p VF.
1437   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1438                            InstructionCost Cost) {
1439     assert(VF.isVector() && "Expected VF >=2");
1440     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1441   }
1442 
1443   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1444   /// interleaving group \p Grp and vector width \p VF.
1445   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1446                            ElementCount VF, InstWidening W,
1447                            InstructionCost Cost) {
1448     assert(VF.isVector() && "Expected VF >=2");
1449     /// Broadcast this decicion to all instructions inside the group.
1450     /// But the cost will be assigned to one instruction only.
1451     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1452       if (auto *I = Grp->getMember(i)) {
1453         if (Grp->getInsertPos() == I)
1454           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1455         else
1456           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1457       }
1458     }
1459   }
1460 
1461   /// Return the cost model decision for the given instruction \p I and vector
1462   /// width \p VF. Return CM_Unknown if this instruction did not pass
1463   /// through the cost modeling.
1464   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1465     assert(VF.isVector() && "Expected VF to be a vector VF");
1466     // Cost model is not run in the VPlan-native path - return conservative
1467     // result until this changes.
1468     if (EnableVPlanNativePath)
1469       return CM_GatherScatter;
1470 
1471     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1472     auto Itr = WideningDecisions.find(InstOnVF);
1473     if (Itr == WideningDecisions.end())
1474       return CM_Unknown;
1475     return Itr->second.first;
1476   }
1477 
1478   /// Return the vectorization cost for the given instruction \p I and vector
1479   /// width \p VF.
1480   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1481     assert(VF.isVector() && "Expected VF >=2");
1482     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1483     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1484            "The cost is not calculated");
1485     return WideningDecisions[InstOnVF].second;
1486   }
1487 
1488   /// Return True if instruction \p I is an optimizable truncate whose operand
1489   /// is an induction variable. Such a truncate will be removed by adding a new
1490   /// induction variable with the destination type.
1491   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1492     // If the instruction is not a truncate, return false.
1493     auto *Trunc = dyn_cast<TruncInst>(I);
1494     if (!Trunc)
1495       return false;
1496 
1497     // Get the source and destination types of the truncate.
1498     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1499     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1500 
1501     // If the truncate is free for the given types, return false. Replacing a
1502     // free truncate with an induction variable would add an induction variable
1503     // update instruction to each iteration of the loop. We exclude from this
1504     // check the primary induction variable since it will need an update
1505     // instruction regardless.
1506     Value *Op = Trunc->getOperand(0);
1507     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1508       return false;
1509 
1510     // If the truncated value is not an induction variable, return false.
1511     return Legal->isInductionPhi(Op);
1512   }
1513 
1514   /// Collects the instructions to scalarize for each predicated instruction in
1515   /// the loop.
1516   void collectInstsToScalarize(ElementCount VF);
1517 
1518   /// Collect Uniform and Scalar values for the given \p VF.
1519   /// The sets depend on CM decision for Load/Store instructions
1520   /// that may be vectorized as interleave, gather-scatter or scalarized.
1521   void collectUniformsAndScalars(ElementCount VF) {
1522     // Do the analysis once.
1523     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1524       return;
1525     setCostBasedWideningDecision(VF);
1526     collectLoopUniforms(VF);
1527     collectLoopScalars(VF);
1528   }
1529 
1530   /// Returns true if the target machine supports masked store operation
1531   /// for the given \p DataType and kind of access to \p Ptr.
1532   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1533     return Legal->isConsecutivePtr(DataType, Ptr) &&
1534            TTI.isLegalMaskedStore(DataType, Alignment);
1535   }
1536 
1537   /// Returns true if the target machine supports masked load operation
1538   /// for the given \p DataType and kind of access to \p Ptr.
1539   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1540     return Legal->isConsecutivePtr(DataType, Ptr) &&
1541            TTI.isLegalMaskedLoad(DataType, Alignment);
1542   }
1543 
1544   /// Returns true if the target machine can represent \p V as a masked gather
1545   /// or scatter operation.
1546   bool isLegalGatherOrScatter(Value *V) {
1547     bool LI = isa<LoadInst>(V);
1548     bool SI = isa<StoreInst>(V);
1549     if (!LI && !SI)
1550       return false;
1551     auto *Ty = getLoadStoreType(V);
1552     Align Align = getLoadStoreAlignment(V);
1553     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1554            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1555   }
1556 
1557   /// Returns true if the target machine supports all of the reduction
1558   /// variables found for the given VF.
1559   bool canVectorizeReductions(ElementCount VF) const {
1560     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1561       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1562       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1563     }));
1564   }
1565 
1566   /// Returns true if \p I is an instruction that will be scalarized with
1567   /// predication. Such instructions include conditional stores and
1568   /// instructions that may divide by zero.
1569   /// If a non-zero VF has been calculated, we check if I will be scalarized
1570   /// predication for that VF.
1571   bool isScalarWithPredication(Instruction *I) const;
1572 
1573   // Returns true if \p I is an instruction that will be predicated either
1574   // through scalar predication or masked load/store or masked gather/scatter.
1575   // Superset of instructions that return true for isScalarWithPredication.
1576   bool isPredicatedInst(Instruction *I, bool IsKnownUniform = false) {
1577     // When we know the load is uniform and the original scalar loop was not
1578     // predicated we don't need to mark it as a predicated instruction. Any
1579     // vectorised blocks created when tail-folding are something artificial we
1580     // have introduced and we know there is always at least one active lane.
1581     // That's why we call Legal->blockNeedsPredication here because it doesn't
1582     // query tail-folding.
1583     if (IsKnownUniform && isa<LoadInst>(I) &&
1584         !Legal->blockNeedsPredication(I->getParent()))
1585       return false;
1586     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1587       return false;
1588     // Loads and stores that need some form of masked operation are predicated
1589     // instructions.
1590     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1591       return Legal->isMaskRequired(I);
1592     return isScalarWithPredication(I);
1593   }
1594 
1595   /// Returns true if \p I is a memory instruction with consecutive memory
1596   /// access that can be widened.
1597   bool
1598   memoryInstructionCanBeWidened(Instruction *I,
1599                                 ElementCount VF = ElementCount::getFixed(1));
1600 
1601   /// Returns true if \p I is a memory instruction in an interleaved-group
1602   /// of memory accesses that can be vectorized with wide vector loads/stores
1603   /// and shuffles.
1604   bool
1605   interleavedAccessCanBeWidened(Instruction *I,
1606                                 ElementCount VF = ElementCount::getFixed(1));
1607 
1608   /// Check if \p Instr belongs to any interleaved access group.
1609   bool isAccessInterleaved(Instruction *Instr) {
1610     return InterleaveInfo.isInterleaved(Instr);
1611   }
1612 
1613   /// Get the interleaved access group that \p Instr belongs to.
1614   const InterleaveGroup<Instruction> *
1615   getInterleavedAccessGroup(Instruction *Instr) {
1616     return InterleaveInfo.getInterleaveGroup(Instr);
1617   }
1618 
1619   /// Returns true if we're required to use a scalar epilogue for at least
1620   /// the final iteration of the original loop.
1621   bool requiresScalarEpilogue(ElementCount VF) const {
1622     if (!isScalarEpilogueAllowed())
1623       return false;
1624     // If we might exit from anywhere but the latch, must run the exiting
1625     // iteration in scalar form.
1626     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1627       return true;
1628     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1629   }
1630 
1631   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1632   /// loop hint annotation.
1633   bool isScalarEpilogueAllowed() const {
1634     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1635   }
1636 
1637   /// Returns true if all loop blocks should be masked to fold tail loop.
1638   bool foldTailByMasking() const { return FoldTailByMasking; }
1639 
1640   /// Returns true if the instructions in this block requires predication
1641   /// for any reason, e.g. because tail folding now requires a predicate
1642   /// or because the block in the original loop was predicated.
1643   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1644     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1645   }
1646 
1647   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1648   /// nodes to the chain of instructions representing the reductions. Uses a
1649   /// MapVector to ensure deterministic iteration order.
1650   using ReductionChainMap =
1651       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1652 
1653   /// Return the chain of instructions representing an inloop reduction.
1654   const ReductionChainMap &getInLoopReductionChains() const {
1655     return InLoopReductionChains;
1656   }
1657 
1658   /// Returns true if the Phi is part of an inloop reduction.
1659   bool isInLoopReduction(PHINode *Phi) const {
1660     return InLoopReductionChains.count(Phi);
1661   }
1662 
1663   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1664   /// with factor VF.  Return the cost of the instruction, including
1665   /// scalarization overhead if it's needed.
1666   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1667 
1668   /// Estimate cost of a call instruction CI if it were vectorized with factor
1669   /// VF. Return the cost of the instruction, including scalarization overhead
1670   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1671   /// scalarized -
1672   /// i.e. either vector version isn't available, or is too expensive.
1673   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1674                                     bool &NeedToScalarize) const;
1675 
1676   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1677   /// that of B.
1678   bool isMoreProfitable(const VectorizationFactor &A,
1679                         const VectorizationFactor &B) const;
1680 
1681   /// Invalidates decisions already taken by the cost model.
1682   void invalidateCostModelingDecisions() {
1683     WideningDecisions.clear();
1684     Uniforms.clear();
1685     Scalars.clear();
1686   }
1687 
1688 private:
1689   unsigned NumPredStores = 0;
1690 
1691   /// \return An upper bound for the vectorization factors for both
1692   /// fixed and scalable vectorization, where the minimum-known number of
1693   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1694   /// disabled or unsupported, then the scalable part will be equal to
1695   /// ElementCount::getScalable(0).
1696   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1697                                            ElementCount UserVF,
1698                                            bool FoldTailByMasking);
1699 
1700   /// \return the maximized element count based on the targets vector
1701   /// registers and the loop trip-count, but limited to a maximum safe VF.
1702   /// This is a helper function of computeFeasibleMaxVF.
1703   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1704   /// issue that occurred on one of the buildbots which cannot be reproduced
1705   /// without having access to the properietary compiler (see comments on
1706   /// D98509). The issue is currently under investigation and this workaround
1707   /// will be removed as soon as possible.
1708   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1709                                        unsigned SmallestType,
1710                                        unsigned WidestType,
1711                                        const ElementCount &MaxSafeVF,
1712                                        bool FoldTailByMasking);
1713 
1714   /// \return the maximum legal scalable VF, based on the safe max number
1715   /// of elements.
1716   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1717 
1718   /// The vectorization cost is a combination of the cost itself and a boolean
1719   /// indicating whether any of the contributing operations will actually
1720   /// operate on vector values after type legalization in the backend. If this
1721   /// latter value is false, then all operations will be scalarized (i.e. no
1722   /// vectorization has actually taken place).
1723   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1724 
1725   /// Returns the expected execution cost. The unit of the cost does
1726   /// not matter because we use the 'cost' units to compare different
1727   /// vector widths. The cost that is returned is *not* normalized by
1728   /// the factor width. If \p Invalid is not nullptr, this function
1729   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1730   /// each instruction that has an Invalid cost for the given VF.
1731   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1732   VectorizationCostTy
1733   expectedCost(ElementCount VF,
1734                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1735 
1736   /// Returns the execution time cost of an instruction for a given vector
1737   /// width. Vector width of one means scalar.
1738   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1739 
1740   /// The cost-computation logic from getInstructionCost which provides
1741   /// the vector type as an output parameter.
1742   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1743                                      Type *&VectorTy);
1744 
1745   /// Return the cost of instructions in an inloop reduction pattern, if I is
1746   /// part of that pattern.
1747   Optional<InstructionCost>
1748   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1749                           TTI::TargetCostKind CostKind);
1750 
1751   /// Calculate vectorization cost of memory instruction \p I.
1752   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1753 
1754   /// The cost computation for scalarized memory instruction.
1755   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1756 
1757   /// The cost computation for interleaving group of memory instructions.
1758   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1759 
1760   /// The cost computation for Gather/Scatter instruction.
1761   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1762 
1763   /// The cost computation for widening instruction \p I with consecutive
1764   /// memory access.
1765   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1766 
1767   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1768   /// Load: scalar load + broadcast.
1769   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1770   /// element)
1771   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1772 
1773   /// Estimate the overhead of scalarizing an instruction. This is a
1774   /// convenience wrapper for the type-based getScalarizationOverhead API.
1775   InstructionCost getScalarizationOverhead(Instruction *I,
1776                                            ElementCount VF) const;
1777 
1778   /// Returns whether the instruction is a load or store and will be a emitted
1779   /// as a vector operation.
1780   bool isConsecutiveLoadOrStore(Instruction *I);
1781 
1782   /// Returns true if an artificially high cost for emulated masked memrefs
1783   /// should be used.
1784   bool useEmulatedMaskMemRefHack(Instruction *I);
1785 
1786   /// Map of scalar integer values to the smallest bitwidth they can be legally
1787   /// represented as. The vector equivalents of these values should be truncated
1788   /// to this type.
1789   MapVector<Instruction *, uint64_t> MinBWs;
1790 
1791   /// A type representing the costs for instructions if they were to be
1792   /// scalarized rather than vectorized. The entries are Instruction-Cost
1793   /// pairs.
1794   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1795 
1796   /// A set containing all BasicBlocks that are known to present after
1797   /// vectorization as a predicated block.
1798   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1799 
1800   /// Records whether it is allowed to have the original scalar loop execute at
1801   /// least once. This may be needed as a fallback loop in case runtime
1802   /// aliasing/dependence checks fail, or to handle the tail/remainder
1803   /// iterations when the trip count is unknown or doesn't divide by the VF,
1804   /// or as a peel-loop to handle gaps in interleave-groups.
1805   /// Under optsize and when the trip count is very small we don't allow any
1806   /// iterations to execute in the scalar loop.
1807   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1808 
1809   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1810   bool FoldTailByMasking = false;
1811 
1812   /// A map holding scalar costs for different vectorization factors. The
1813   /// presence of a cost for an instruction in the mapping indicates that the
1814   /// instruction will be scalarized when vectorizing with the associated
1815   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1816   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1817 
1818   /// Holds the instructions known to be uniform after vectorization.
1819   /// The data is collected per VF.
1820   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1821 
1822   /// Holds the instructions known to be scalar after vectorization.
1823   /// The data is collected per VF.
1824   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1825 
1826   /// Holds the instructions (address computations) that are forced to be
1827   /// scalarized.
1828   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1829 
1830   /// PHINodes of the reductions that should be expanded in-loop along with
1831   /// their associated chains of reduction operations, in program order from top
1832   /// (PHI) to bottom
1833   ReductionChainMap InLoopReductionChains;
1834 
1835   /// A Map of inloop reduction operations and their immediate chain operand.
1836   /// FIXME: This can be removed once reductions can be costed correctly in
1837   /// vplan. This was added to allow quick lookup to the inloop operations,
1838   /// without having to loop through InLoopReductionChains.
1839   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1840 
1841   /// Returns the expected difference in cost from scalarizing the expression
1842   /// feeding a predicated instruction \p PredInst. The instructions to
1843   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1844   /// non-negative return value implies the expression will be scalarized.
1845   /// Currently, only single-use chains are considered for scalarization.
1846   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1847                               ElementCount VF);
1848 
1849   /// Collect the instructions that are uniform after vectorization. An
1850   /// instruction is uniform if we represent it with a single scalar value in
1851   /// the vectorized loop corresponding to each vector iteration. Examples of
1852   /// uniform instructions include pointer operands of consecutive or
1853   /// interleaved memory accesses. Note that although uniformity implies an
1854   /// instruction will be scalar, the reverse is not true. In general, a
1855   /// scalarized instruction will be represented by VF scalar values in the
1856   /// vectorized loop, each corresponding to an iteration of the original
1857   /// scalar loop.
1858   void collectLoopUniforms(ElementCount VF);
1859 
1860   /// Collect the instructions that are scalar after vectorization. An
1861   /// instruction is scalar if it is known to be uniform or will be scalarized
1862   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1863   /// to the list if they are used by a load/store instruction that is marked as
1864   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1865   /// VF values in the vectorized loop, each corresponding to an iteration of
1866   /// the original scalar loop.
1867   void collectLoopScalars(ElementCount VF);
1868 
1869   /// Keeps cost model vectorization decision and cost for instructions.
1870   /// Right now it is used for memory instructions only.
1871   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1872                                 std::pair<InstWidening, InstructionCost>>;
1873 
1874   DecisionList WideningDecisions;
1875 
1876   /// Returns true if \p V is expected to be vectorized and it needs to be
1877   /// extracted.
1878   bool needsExtract(Value *V, ElementCount VF) const {
1879     Instruction *I = dyn_cast<Instruction>(V);
1880     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1881         TheLoop->isLoopInvariant(I))
1882       return false;
1883 
1884     // Assume we can vectorize V (and hence we need extraction) if the
1885     // scalars are not computed yet. This can happen, because it is called
1886     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1887     // the scalars are collected. That should be a safe assumption in most
1888     // cases, because we check if the operands have vectorizable types
1889     // beforehand in LoopVectorizationLegality.
1890     return Scalars.find(VF) == Scalars.end() ||
1891            !isScalarAfterVectorization(I, VF);
1892   };
1893 
1894   /// Returns a range containing only operands needing to be extracted.
1895   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1896                                                    ElementCount VF) const {
1897     return SmallVector<Value *, 4>(make_filter_range(
1898         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1899   }
1900 
1901   /// Determines if we have the infrastructure to vectorize loop \p L and its
1902   /// epilogue, assuming the main loop is vectorized by \p VF.
1903   bool isCandidateForEpilogueVectorization(const Loop &L,
1904                                            const ElementCount VF) const;
1905 
1906   /// Returns true if epilogue vectorization is considered profitable, and
1907   /// false otherwise.
1908   /// \p VF is the vectorization factor chosen for the original loop.
1909   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1910 
1911 public:
1912   /// The loop that we evaluate.
1913   Loop *TheLoop;
1914 
1915   /// Predicated scalar evolution analysis.
1916   PredicatedScalarEvolution &PSE;
1917 
1918   /// Loop Info analysis.
1919   LoopInfo *LI;
1920 
1921   /// Vectorization legality.
1922   LoopVectorizationLegality *Legal;
1923 
1924   /// Vector target information.
1925   const TargetTransformInfo &TTI;
1926 
1927   /// Target Library Info.
1928   const TargetLibraryInfo *TLI;
1929 
1930   /// Demanded bits analysis.
1931   DemandedBits *DB;
1932 
1933   /// Assumption cache.
1934   AssumptionCache *AC;
1935 
1936   /// Interface to emit optimization remarks.
1937   OptimizationRemarkEmitter *ORE;
1938 
1939   const Function *TheFunction;
1940 
1941   /// Loop Vectorize Hint.
1942   const LoopVectorizeHints *Hints;
1943 
1944   /// The interleave access information contains groups of interleaved accesses
1945   /// with the same stride and close to each other.
1946   InterleavedAccessInfo &InterleaveInfo;
1947 
1948   /// Values to ignore in the cost model.
1949   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1950 
1951   /// Values to ignore in the cost model when VF > 1.
1952   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1953 
1954   /// All element types found in the loop.
1955   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1956 
1957   /// Profitable vector factors.
1958   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1959 };
1960 } // end namespace llvm
1961 
1962 /// Helper struct to manage generating runtime checks for vectorization.
1963 ///
1964 /// The runtime checks are created up-front in temporary blocks to allow better
1965 /// estimating the cost and un-linked from the existing IR. After deciding to
1966 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1967 /// temporary blocks are completely removed.
1968 class GeneratedRTChecks {
1969   /// Basic block which contains the generated SCEV checks, if any.
1970   BasicBlock *SCEVCheckBlock = nullptr;
1971 
1972   /// The value representing the result of the generated SCEV checks. If it is
1973   /// nullptr, either no SCEV checks have been generated or they have been used.
1974   Value *SCEVCheckCond = nullptr;
1975 
1976   /// Basic block which contains the generated memory runtime checks, if any.
1977   BasicBlock *MemCheckBlock = nullptr;
1978 
1979   /// The value representing the result of the generated memory runtime checks.
1980   /// If it is nullptr, either no memory runtime checks have been generated or
1981   /// they have been used.
1982   Value *MemRuntimeCheckCond = nullptr;
1983 
1984   DominatorTree *DT;
1985   LoopInfo *LI;
1986 
1987   SCEVExpander SCEVExp;
1988   SCEVExpander MemCheckExp;
1989 
1990 public:
1991   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1992                     const DataLayout &DL)
1993       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1994         MemCheckExp(SE, DL, "scev.check") {}
1995 
1996   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1997   /// accurately estimate the cost of the runtime checks. The blocks are
1998   /// un-linked from the IR and is added back during vector code generation. If
1999   /// there is no vector code generation, the check blocks are removed
2000   /// completely.
2001   void Create(Loop *L, const LoopAccessInfo &LAI,
2002               const SCEVUnionPredicate &UnionPred) {
2003 
2004     BasicBlock *LoopHeader = L->getHeader();
2005     BasicBlock *Preheader = L->getLoopPreheader();
2006 
2007     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
2008     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
2009     // may be used by SCEVExpander. The blocks will be un-linked from their
2010     // predecessors and removed from LI & DT at the end of the function.
2011     if (!UnionPred.isAlwaysTrue()) {
2012       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
2013                                   nullptr, "vector.scevcheck");
2014 
2015       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
2016           &UnionPred, SCEVCheckBlock->getTerminator());
2017     }
2018 
2019     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
2020     if (RtPtrChecking.Need) {
2021       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2022       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2023                                  "vector.memcheck");
2024 
2025       MemRuntimeCheckCond =
2026           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2027                            RtPtrChecking.getChecks(), MemCheckExp);
2028       assert(MemRuntimeCheckCond &&
2029              "no RT checks generated although RtPtrChecking "
2030              "claimed checks are required");
2031     }
2032 
2033     if (!MemCheckBlock && !SCEVCheckBlock)
2034       return;
2035 
2036     // Unhook the temporary block with the checks, update various places
2037     // accordingly.
2038     if (SCEVCheckBlock)
2039       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2040     if (MemCheckBlock)
2041       MemCheckBlock->replaceAllUsesWith(Preheader);
2042 
2043     if (SCEVCheckBlock) {
2044       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2045       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2046       Preheader->getTerminator()->eraseFromParent();
2047     }
2048     if (MemCheckBlock) {
2049       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2050       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2051       Preheader->getTerminator()->eraseFromParent();
2052     }
2053 
2054     DT->changeImmediateDominator(LoopHeader, Preheader);
2055     if (MemCheckBlock) {
2056       DT->eraseNode(MemCheckBlock);
2057       LI->removeBlock(MemCheckBlock);
2058     }
2059     if (SCEVCheckBlock) {
2060       DT->eraseNode(SCEVCheckBlock);
2061       LI->removeBlock(SCEVCheckBlock);
2062     }
2063   }
2064 
2065   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2066   /// unused.
2067   ~GeneratedRTChecks() {
2068     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
2069     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
2070     if (!SCEVCheckCond)
2071       SCEVCleaner.markResultUsed();
2072 
2073     if (!MemRuntimeCheckCond)
2074       MemCheckCleaner.markResultUsed();
2075 
2076     if (MemRuntimeCheckCond) {
2077       auto &SE = *MemCheckExp.getSE();
2078       // Memory runtime check generation creates compares that use expanded
2079       // values. Remove them before running the SCEVExpanderCleaners.
2080       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2081         if (MemCheckExp.isInsertedInstruction(&I))
2082           continue;
2083         SE.forgetValue(&I);
2084         I.eraseFromParent();
2085       }
2086     }
2087     MemCheckCleaner.cleanup();
2088     SCEVCleaner.cleanup();
2089 
2090     if (SCEVCheckCond)
2091       SCEVCheckBlock->eraseFromParent();
2092     if (MemRuntimeCheckCond)
2093       MemCheckBlock->eraseFromParent();
2094   }
2095 
2096   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2097   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2098   /// depending on the generated condition.
2099   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2100                              BasicBlock *LoopVectorPreHeader,
2101                              BasicBlock *LoopExitBlock) {
2102     if (!SCEVCheckCond)
2103       return nullptr;
2104     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2105       if (C->isZero())
2106         return nullptr;
2107 
2108     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2109 
2110     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2111     // Create new preheader for vector loop.
2112     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2113       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2114 
2115     SCEVCheckBlock->getTerminator()->eraseFromParent();
2116     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2117     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2118                                                 SCEVCheckBlock);
2119 
2120     DT->addNewBlock(SCEVCheckBlock, Pred);
2121     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2122 
2123     ReplaceInstWithInst(
2124         SCEVCheckBlock->getTerminator(),
2125         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2126     // Mark the check as used, to prevent it from being removed during cleanup.
2127     SCEVCheckCond = nullptr;
2128     return SCEVCheckBlock;
2129   }
2130 
2131   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2132   /// the branches to branch to the vector preheader or \p Bypass, depending on
2133   /// the generated condition.
2134   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2135                                    BasicBlock *LoopVectorPreHeader) {
2136     // Check if we generated code that checks in runtime if arrays overlap.
2137     if (!MemRuntimeCheckCond)
2138       return nullptr;
2139 
2140     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2141     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2142                                                 MemCheckBlock);
2143 
2144     DT->addNewBlock(MemCheckBlock, Pred);
2145     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2146     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2147 
2148     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2149       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2150 
2151     ReplaceInstWithInst(
2152         MemCheckBlock->getTerminator(),
2153         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2154     MemCheckBlock->getTerminator()->setDebugLoc(
2155         Pred->getTerminator()->getDebugLoc());
2156 
2157     // Mark the check as used, to prevent it from being removed during cleanup.
2158     MemRuntimeCheckCond = nullptr;
2159     return MemCheckBlock;
2160   }
2161 };
2162 
2163 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2164 // vectorization. The loop needs to be annotated with #pragma omp simd
2165 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2166 // vector length information is not provided, vectorization is not considered
2167 // explicit. Interleave hints are not allowed either. These limitations will be
2168 // relaxed in the future.
2169 // Please, note that we are currently forced to abuse the pragma 'clang
2170 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2171 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2172 // provides *explicit vectorization hints* (LV can bypass legal checks and
2173 // assume that vectorization is legal). However, both hints are implemented
2174 // using the same metadata (llvm.loop.vectorize, processed by
2175 // LoopVectorizeHints). This will be fixed in the future when the native IR
2176 // representation for pragma 'omp simd' is introduced.
2177 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2178                                    OptimizationRemarkEmitter *ORE) {
2179   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2180   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2181 
2182   // Only outer loops with an explicit vectorization hint are supported.
2183   // Unannotated outer loops are ignored.
2184   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2185     return false;
2186 
2187   Function *Fn = OuterLp->getHeader()->getParent();
2188   if (!Hints.allowVectorization(Fn, OuterLp,
2189                                 true /*VectorizeOnlyWhenForced*/)) {
2190     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2191     return false;
2192   }
2193 
2194   if (Hints.getInterleave() > 1) {
2195     // TODO: Interleave support is future work.
2196     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2197                          "outer loops.\n");
2198     Hints.emitRemarkWithHints();
2199     return false;
2200   }
2201 
2202   return true;
2203 }
2204 
2205 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2206                                   OptimizationRemarkEmitter *ORE,
2207                                   SmallVectorImpl<Loop *> &V) {
2208   // Collect inner loops and outer loops without irreducible control flow. For
2209   // now, only collect outer loops that have explicit vectorization hints. If we
2210   // are stress testing the VPlan H-CFG construction, we collect the outermost
2211   // loop of every loop nest.
2212   if (L.isInnermost() || VPlanBuildStressTest ||
2213       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2214     LoopBlocksRPO RPOT(&L);
2215     RPOT.perform(LI);
2216     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2217       V.push_back(&L);
2218       // TODO: Collect inner loops inside marked outer loops in case
2219       // vectorization fails for the outer loop. Do not invoke
2220       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2221       // already known to be reducible. We can use an inherited attribute for
2222       // that.
2223       return;
2224     }
2225   }
2226   for (Loop *InnerL : L)
2227     collectSupportedLoops(*InnerL, LI, ORE, V);
2228 }
2229 
2230 namespace {
2231 
2232 /// The LoopVectorize Pass.
2233 struct LoopVectorize : public FunctionPass {
2234   /// Pass identification, replacement for typeid
2235   static char ID;
2236 
2237   LoopVectorizePass Impl;
2238 
2239   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2240                          bool VectorizeOnlyWhenForced = false)
2241       : FunctionPass(ID),
2242         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2243     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2244   }
2245 
2246   bool runOnFunction(Function &F) override {
2247     if (skipFunction(F))
2248       return false;
2249 
2250     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2251     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2252     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2253     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2254     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2255     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2256     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2257     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2258     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2259     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2260     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2261     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2262     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2263 
2264     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2265         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2266 
2267     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2268                         GetLAA, *ORE, PSI).MadeAnyChange;
2269   }
2270 
2271   void getAnalysisUsage(AnalysisUsage &AU) const override {
2272     AU.addRequired<AssumptionCacheTracker>();
2273     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2274     AU.addRequired<DominatorTreeWrapperPass>();
2275     AU.addRequired<LoopInfoWrapperPass>();
2276     AU.addRequired<ScalarEvolutionWrapperPass>();
2277     AU.addRequired<TargetTransformInfoWrapperPass>();
2278     AU.addRequired<AAResultsWrapperPass>();
2279     AU.addRequired<LoopAccessLegacyAnalysis>();
2280     AU.addRequired<DemandedBitsWrapperPass>();
2281     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2282     AU.addRequired<InjectTLIMappingsLegacy>();
2283 
2284     // We currently do not preserve loopinfo/dominator analyses with outer loop
2285     // vectorization. Until this is addressed, mark these analyses as preserved
2286     // only for non-VPlan-native path.
2287     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2288     if (!EnableVPlanNativePath) {
2289       AU.addPreserved<LoopInfoWrapperPass>();
2290       AU.addPreserved<DominatorTreeWrapperPass>();
2291     }
2292 
2293     AU.addPreserved<BasicAAWrapperPass>();
2294     AU.addPreserved<GlobalsAAWrapperPass>();
2295     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2296   }
2297 };
2298 
2299 } // end anonymous namespace
2300 
2301 //===----------------------------------------------------------------------===//
2302 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2303 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2304 //===----------------------------------------------------------------------===//
2305 
2306 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2307   // We need to place the broadcast of invariant variables outside the loop,
2308   // but only if it's proven safe to do so. Else, broadcast will be inside
2309   // vector loop body.
2310   Instruction *Instr = dyn_cast<Instruction>(V);
2311   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2312                      (!Instr ||
2313                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2314   // Place the code for broadcasting invariant variables in the new preheader.
2315   IRBuilder<>::InsertPointGuard Guard(Builder);
2316   if (SafeToHoist)
2317     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2318 
2319   // Broadcast the scalar into all locations in the vector.
2320   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2321 
2322   return Shuf;
2323 }
2324 
2325 /// This function adds
2326 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2327 /// to each vector element of Val. The sequence starts at StartIndex.
2328 /// \p Opcode is relevant for FP induction variable.
2329 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2330                             Instruction::BinaryOps BinOp, ElementCount VF,
2331                             IRBuilder<> &Builder) {
2332   if (VF.isScalar()) {
2333     // When unrolling and the VF is 1, we only need to add a simple scalar.
2334     Type *Ty = Val->getType();
2335     assert(!Ty->isVectorTy() && "Val must be a scalar");
2336 
2337     if (Ty->isFloatingPointTy()) {
2338       // Floating-point operations inherit FMF via the builder's flags.
2339       Value *MulOp = Builder.CreateFMul(StartIdx, Step);
2340       return Builder.CreateBinOp(BinOp, Val, MulOp);
2341     }
2342     return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step),
2343                              "induction");
2344   }
2345 
2346   // Create and check the types.
2347   auto *ValVTy = cast<VectorType>(Val->getType());
2348   ElementCount VLen = ValVTy->getElementCount();
2349 
2350   Type *STy = Val->getType()->getScalarType();
2351   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2352          "Induction Step must be an integer or FP");
2353   assert(Step->getType() == STy && "Step has wrong type");
2354 
2355   SmallVector<Constant *, 8> Indices;
2356 
2357   // Create a vector of consecutive numbers from zero to VF.
2358   VectorType *InitVecValVTy = ValVTy;
2359   Type *InitVecValSTy = STy;
2360   if (STy->isFloatingPointTy()) {
2361     InitVecValSTy =
2362         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2363     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2364   }
2365   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2366 
2367   // Splat the StartIdx
2368   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2369 
2370   if (STy->isIntegerTy()) {
2371     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2372     Step = Builder.CreateVectorSplat(VLen, Step);
2373     assert(Step->getType() == Val->getType() && "Invalid step vec");
2374     // FIXME: The newly created binary instructions should contain nsw/nuw
2375     // flags, which can be found from the original scalar operations.
2376     Step = Builder.CreateMul(InitVec, Step);
2377     return Builder.CreateAdd(Val, Step, "induction");
2378   }
2379 
2380   // Floating point induction.
2381   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2382          "Binary Opcode should be specified for FP induction");
2383   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2384   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2385 
2386   Step = Builder.CreateVectorSplat(VLen, Step);
2387   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2388   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2389 }
2390 
2391 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2392     const InductionDescriptor &II, Value *Step, Value *Start,
2393     Instruction *EntryVal, VPValue *Def, VPTransformState &State) {
2394   IRBuilder<> &Builder = State.Builder;
2395   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2396          "Expected either an induction phi-node or a truncate of it!");
2397 
2398   // Construct the initial value of the vector IV in the vector loop preheader
2399   auto CurrIP = Builder.saveIP();
2400   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2401   if (isa<TruncInst>(EntryVal)) {
2402     assert(Start->getType()->isIntegerTy() &&
2403            "Truncation requires an integer type");
2404     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2405     Step = Builder.CreateTrunc(Step, TruncType);
2406     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2407   }
2408 
2409   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
2410   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
2411   Value *SteppedStart = getStepVector(
2412       SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder);
2413 
2414   // We create vector phi nodes for both integer and floating-point induction
2415   // variables. Here, we determine the kind of arithmetic we will perform.
2416   Instruction::BinaryOps AddOp;
2417   Instruction::BinaryOps MulOp;
2418   if (Step->getType()->isIntegerTy()) {
2419     AddOp = Instruction::Add;
2420     MulOp = Instruction::Mul;
2421   } else {
2422     AddOp = II.getInductionOpcode();
2423     MulOp = Instruction::FMul;
2424   }
2425 
2426   // Multiply the vectorization factor by the step using integer or
2427   // floating-point arithmetic as appropriate.
2428   Type *StepType = Step->getType();
2429   Value *RuntimeVF;
2430   if (Step->getType()->isFloatingPointTy())
2431     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
2432   else
2433     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
2434   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2435 
2436   // Create a vector splat to use in the induction update.
2437   //
2438   // FIXME: If the step is non-constant, we create the vector splat with
2439   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2440   //        handle a constant vector splat.
2441   Value *SplatVF = isa<Constant>(Mul)
2442                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
2443                        : Builder.CreateVectorSplat(State.VF, Mul);
2444   Builder.restoreIP(CurrIP);
2445 
2446   // We may need to add the step a number of times, depending on the unroll
2447   // factor. The last of those goes into the PHI.
2448   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2449                                     &*LoopVectorBody->getFirstInsertionPt());
2450   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2451   Instruction *LastInduction = VecInd;
2452   for (unsigned Part = 0; Part < UF; ++Part) {
2453     State.set(Def, LastInduction, Part);
2454 
2455     if (isa<TruncInst>(EntryVal))
2456       addMetadata(LastInduction, EntryVal);
2457 
2458     LastInduction = cast<Instruction>(
2459         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2460     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2461   }
2462 
2463   // Move the last step to the end of the latch block. This ensures consistent
2464   // placement of all induction updates.
2465   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2466   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2467   LastInduction->moveBefore(Br);
2468   LastInduction->setName("vec.ind.next");
2469 
2470   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2471   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2472 }
2473 
2474 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2475   return Cost->isScalarAfterVectorization(I, VF) ||
2476          Cost->isProfitableToScalarize(I, VF);
2477 }
2478 
2479 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2480   if (shouldScalarizeInstruction(IV))
2481     return true;
2482   auto isScalarInst = [&](User *U) -> bool {
2483     auto *I = cast<Instruction>(U);
2484     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2485   };
2486   return llvm::any_of(IV->users(), isScalarInst);
2487 }
2488 
2489 /// Returns true if \p ID starts at 0 and has a step of 1.
2490 static bool isCanonicalID(const InductionDescriptor &ID) {
2491   if (!ID.getConstIntStepValue() || !ID.getConstIntStepValue()->isOne())
2492     return false;
2493   auto *StartC = dyn_cast<ConstantInt>(ID.getStartValue());
2494   return StartC && StartC->isZero();
2495 }
2496 
2497 void InnerLoopVectorizer::widenIntOrFpInduction(
2498     PHINode *IV, const InductionDescriptor &ID, Value *Start, TruncInst *Trunc,
2499     VPValue *Def, VPTransformState &State, Value *CanonicalIV) {
2500   IRBuilder<> &Builder = State.Builder;
2501   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2502   assert(!State.VF.isZero() && "VF must be non-zero");
2503 
2504   // The value from the original loop to which we are mapping the new induction
2505   // variable.
2506   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2507 
2508   auto &DL = EntryVal->getModule()->getDataLayout();
2509 
2510   // Generate code for the induction step. Note that induction steps are
2511   // required to be loop-invariant
2512   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2513     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2514            "Induction step should be loop invariant");
2515     if (PSE.getSE()->isSCEVable(IV->getType())) {
2516       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2517       return Exp.expandCodeFor(Step, Step->getType(),
2518                                State.CFG.VectorPreHeader->getTerminator());
2519     }
2520     return cast<SCEVUnknown>(Step)->getValue();
2521   };
2522 
2523   // The scalar value to broadcast. This is derived from the canonical
2524   // induction variable. If a truncation type is given, truncate the canonical
2525   // induction variable and step. Otherwise, derive these values from the
2526   // induction descriptor.
2527   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2528     Value *ScalarIV = CanonicalIV;
2529     Type *NeededType = IV->getType();
2530     if (!isCanonicalID(ID) || ScalarIV->getType() != NeededType) {
2531       ScalarIV =
2532           NeededType->isIntegerTy()
2533               ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType)
2534               : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType);
2535       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID,
2536                                       State.CFG.PrevBB);
2537       ScalarIV->setName("offset.idx");
2538     }
2539     if (Trunc) {
2540       auto *TruncType = cast<IntegerType>(Trunc->getType());
2541       assert(Step->getType()->isIntegerTy() &&
2542              "Truncation requires an integer step");
2543       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2544       Step = Builder.CreateTrunc(Step, TruncType);
2545     }
2546     return ScalarIV;
2547   };
2548 
2549   // Create the vector values from the scalar IV, in the absence of creating a
2550   // vector IV.
2551   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2552     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2553     for (unsigned Part = 0; Part < UF; ++Part) {
2554       assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
2555       Value *StartIdx;
2556       if (Step->getType()->isFloatingPointTy())
2557         StartIdx =
2558             getRuntimeVFAsFloat(Builder, Step->getType(), State.VF * Part);
2559       else
2560         StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part);
2561 
2562       Value *EntryPart =
2563           getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(),
2564                         State.VF, State.Builder);
2565       State.set(Def, EntryPart, Part);
2566       if (Trunc)
2567         addMetadata(EntryPart, Trunc);
2568     }
2569   };
2570 
2571   // Fast-math-flags propagate from the original induction instruction.
2572   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2573   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2574     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2575 
2576   // Now do the actual transformations, and start with creating the step value.
2577   Value *Step = CreateStepValue(ID.getStep());
2578   if (State.VF.isScalar()) {
2579     Value *ScalarIV = CreateScalarIV(Step);
2580     CreateSplatIV(ScalarIV, Step);
2581     return;
2582   }
2583 
2584   // Determine if we want a scalar version of the induction variable. This is
2585   // true if the induction variable itself is not widened, or if it has at
2586   // least one user in the loop that is not widened.
2587   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2588   if (!NeedsScalarIV) {
2589     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2590     return;
2591   }
2592 
2593   // Try to create a new independent vector induction variable. If we can't
2594   // create the phi node, we will splat the scalar induction variable in each
2595   // loop iteration.
2596   if (!shouldScalarizeInstruction(EntryVal)) {
2597     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2598     Value *ScalarIV = CreateScalarIV(Step);
2599     // Create scalar steps that can be used by instructions we will later
2600     // scalarize. Note that the addition of the scalar steps will not increase
2601     // the number of instructions in the loop in the common case prior to
2602     // InstCombine. We will be trading one vector extract for each scalar step.
2603     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2604     return;
2605   }
2606 
2607   // All IV users are scalar instructions, so only emit a scalar IV, not a
2608   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2609   // predicate used by the masked loads/stores.
2610   Value *ScalarIV = CreateScalarIV(Step);
2611   if (!Cost->isScalarEpilogueAllowed())
2612     CreateSplatIV(ScalarIV, Step);
2613   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2614 }
2615 
2616 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2617                                            Instruction *EntryVal,
2618                                            const InductionDescriptor &ID,
2619                                            VPValue *Def,
2620                                            VPTransformState &State) {
2621   IRBuilder<> &Builder = State.Builder;
2622   // We shouldn't have to build scalar steps if we aren't vectorizing.
2623   assert(State.VF.isVector() && "VF should be greater than one");
2624   // Get the value type and ensure it and the step have the same integer type.
2625   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2626   assert(ScalarIVTy == Step->getType() &&
2627          "Val and Step should have the same type");
2628 
2629   // We build scalar steps for both integer and floating-point induction
2630   // variables. Here, we determine the kind of arithmetic we will perform.
2631   Instruction::BinaryOps AddOp;
2632   Instruction::BinaryOps MulOp;
2633   if (ScalarIVTy->isIntegerTy()) {
2634     AddOp = Instruction::Add;
2635     MulOp = Instruction::Mul;
2636   } else {
2637     AddOp = ID.getInductionOpcode();
2638     MulOp = Instruction::FMul;
2639   }
2640 
2641   // Determine the number of scalars we need to generate for each unroll
2642   // iteration. If EntryVal is uniform, we only need to generate the first
2643   // lane. Otherwise, we generate all VF values.
2644   bool IsUniform =
2645       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), State.VF);
2646   unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
2647   // Compute the scalar steps and save the results in State.
2648   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2649                                      ScalarIVTy->getScalarSizeInBits());
2650   Type *VecIVTy = nullptr;
2651   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2652   if (!IsUniform && State.VF.isScalable()) {
2653     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2654     UnitStepVec =
2655         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2656     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2657     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2658   }
2659 
2660   for (unsigned Part = 0; Part < State.UF; ++Part) {
2661     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2662 
2663     if (!IsUniform && State.VF.isScalable()) {
2664       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2665       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2666       if (ScalarIVTy->isFloatingPointTy())
2667         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2668       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2669       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2670       State.set(Def, Add, Part);
2671       // It's useful to record the lane values too for the known minimum number
2672       // of elements so we do those below. This improves the code quality when
2673       // trying to extract the first element, for example.
2674     }
2675 
2676     if (ScalarIVTy->isFloatingPointTy())
2677       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2678 
2679     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2680       Value *StartIdx = Builder.CreateBinOp(
2681           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2682       // The step returned by `createStepForVF` is a runtime-evaluated value
2683       // when VF is scalable. Otherwise, it should be folded into a Constant.
2684       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2685              "Expected StartIdx to be folded to a constant when VF is not "
2686              "scalable");
2687       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2688       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2689       State.set(Def, Add, VPIteration(Part, Lane));
2690     }
2691   }
2692 }
2693 
2694 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2695                                                     const VPIteration &Instance,
2696                                                     VPTransformState &State) {
2697   Value *ScalarInst = State.get(Def, Instance);
2698   Value *VectorValue = State.get(Def, Instance.Part);
2699   VectorValue = Builder.CreateInsertElement(
2700       VectorValue, ScalarInst,
2701       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2702   State.set(Def, VectorValue, Instance.Part);
2703 }
2704 
2705 // Return whether we allow using masked interleave-groups (for dealing with
2706 // strided loads/stores that reside in predicated blocks, or for dealing
2707 // with gaps).
2708 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2709   // If an override option has been passed in for interleaved accesses, use it.
2710   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2711     return EnableMaskedInterleavedMemAccesses;
2712 
2713   return TTI.enableMaskedInterleavedAccessVectorization();
2714 }
2715 
2716 // Try to vectorize the interleave group that \p Instr belongs to.
2717 //
2718 // E.g. Translate following interleaved load group (factor = 3):
2719 //   for (i = 0; i < N; i+=3) {
2720 //     R = Pic[i];             // Member of index 0
2721 //     G = Pic[i+1];           // Member of index 1
2722 //     B = Pic[i+2];           // Member of index 2
2723 //     ... // do something to R, G, B
2724 //   }
2725 // To:
2726 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2727 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2728 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2729 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2730 //
2731 // Or translate following interleaved store group (factor = 3):
2732 //   for (i = 0; i < N; i+=3) {
2733 //     ... do something to R, G, B
2734 //     Pic[i]   = R;           // Member of index 0
2735 //     Pic[i+1] = G;           // Member of index 1
2736 //     Pic[i+2] = B;           // Member of index 2
2737 //   }
2738 // To:
2739 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2740 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2741 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2742 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2743 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2744 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2745     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2746     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2747     VPValue *BlockInMask) {
2748   Instruction *Instr = Group->getInsertPos();
2749   const DataLayout &DL = Instr->getModule()->getDataLayout();
2750 
2751   // Prepare for the vector type of the interleaved load/store.
2752   Type *ScalarTy = getLoadStoreType(Instr);
2753   unsigned InterleaveFactor = Group->getFactor();
2754   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2755   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2756 
2757   // Prepare for the new pointers.
2758   SmallVector<Value *, 2> AddrParts;
2759   unsigned Index = Group->getIndex(Instr);
2760 
2761   // TODO: extend the masked interleaved-group support to reversed access.
2762   assert((!BlockInMask || !Group->isReverse()) &&
2763          "Reversed masked interleave-group not supported.");
2764 
2765   // If the group is reverse, adjust the index to refer to the last vector lane
2766   // instead of the first. We adjust the index from the first vector lane,
2767   // rather than directly getting the pointer for lane VF - 1, because the
2768   // pointer operand of the interleaved access is supposed to be uniform. For
2769   // uniform instructions, we're only required to generate a value for the
2770   // first vector lane in each unroll iteration.
2771   if (Group->isReverse())
2772     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2773 
2774   for (unsigned Part = 0; Part < UF; Part++) {
2775     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2776     setDebugLocFromInst(AddrPart);
2777 
2778     // Notice current instruction could be any index. Need to adjust the address
2779     // to the member of index 0.
2780     //
2781     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2782     //       b = A[i];       // Member of index 0
2783     // Current pointer is pointed to A[i+1], adjust it to A[i].
2784     //
2785     // E.g.  A[i+1] = a;     // Member of index 1
2786     //       A[i]   = b;     // Member of index 0
2787     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2788     // Current pointer is pointed to A[i+2], adjust it to A[i].
2789 
2790     bool InBounds = false;
2791     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2792       InBounds = gep->isInBounds();
2793     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2794     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2795 
2796     // Cast to the vector pointer type.
2797     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2798     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2799     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2800   }
2801 
2802   setDebugLocFromInst(Instr);
2803   Value *PoisonVec = PoisonValue::get(VecTy);
2804 
2805   Value *MaskForGaps = nullptr;
2806   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2807     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2808     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2809   }
2810 
2811   // Vectorize the interleaved load group.
2812   if (isa<LoadInst>(Instr)) {
2813     // For each unroll part, create a wide load for the group.
2814     SmallVector<Value *, 2> NewLoads;
2815     for (unsigned Part = 0; Part < UF; Part++) {
2816       Instruction *NewLoad;
2817       if (BlockInMask || MaskForGaps) {
2818         assert(useMaskedInterleavedAccesses(*TTI) &&
2819                "masked interleaved groups are not allowed.");
2820         Value *GroupMask = MaskForGaps;
2821         if (BlockInMask) {
2822           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2823           Value *ShuffledMask = Builder.CreateShuffleVector(
2824               BlockInMaskPart,
2825               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2826               "interleaved.mask");
2827           GroupMask = MaskForGaps
2828                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2829                                                 MaskForGaps)
2830                           : ShuffledMask;
2831         }
2832         NewLoad =
2833             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2834                                      GroupMask, PoisonVec, "wide.masked.vec");
2835       }
2836       else
2837         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2838                                             Group->getAlign(), "wide.vec");
2839       Group->addMetadata(NewLoad);
2840       NewLoads.push_back(NewLoad);
2841     }
2842 
2843     // For each member in the group, shuffle out the appropriate data from the
2844     // wide loads.
2845     unsigned J = 0;
2846     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2847       Instruction *Member = Group->getMember(I);
2848 
2849       // Skip the gaps in the group.
2850       if (!Member)
2851         continue;
2852 
2853       auto StrideMask =
2854           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2855       for (unsigned Part = 0; Part < UF; Part++) {
2856         Value *StridedVec = Builder.CreateShuffleVector(
2857             NewLoads[Part], StrideMask, "strided.vec");
2858 
2859         // If this member has different type, cast the result type.
2860         if (Member->getType() != ScalarTy) {
2861           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2862           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2863           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2864         }
2865 
2866         if (Group->isReverse())
2867           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2868 
2869         State.set(VPDefs[J], StridedVec, Part);
2870       }
2871       ++J;
2872     }
2873     return;
2874   }
2875 
2876   // The sub vector type for current instruction.
2877   auto *SubVT = VectorType::get(ScalarTy, VF);
2878 
2879   // Vectorize the interleaved store group.
2880   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2881   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2882          "masked interleaved groups are not allowed.");
2883   assert((!MaskForGaps || !VF.isScalable()) &&
2884          "masking gaps for scalable vectors is not yet supported.");
2885   for (unsigned Part = 0; Part < UF; Part++) {
2886     // Collect the stored vector from each member.
2887     SmallVector<Value *, 4> StoredVecs;
2888     for (unsigned i = 0; i < InterleaveFactor; i++) {
2889       assert((Group->getMember(i) || MaskForGaps) &&
2890              "Fail to get a member from an interleaved store group");
2891       Instruction *Member = Group->getMember(i);
2892 
2893       // Skip the gaps in the group.
2894       if (!Member) {
2895         Value *Undef = PoisonValue::get(SubVT);
2896         StoredVecs.push_back(Undef);
2897         continue;
2898       }
2899 
2900       Value *StoredVec = State.get(StoredValues[i], Part);
2901 
2902       if (Group->isReverse())
2903         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2904 
2905       // If this member has different type, cast it to a unified type.
2906 
2907       if (StoredVec->getType() != SubVT)
2908         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2909 
2910       StoredVecs.push_back(StoredVec);
2911     }
2912 
2913     // Concatenate all vectors into a wide vector.
2914     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2915 
2916     // Interleave the elements in the wide vector.
2917     Value *IVec = Builder.CreateShuffleVector(
2918         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2919         "interleaved.vec");
2920 
2921     Instruction *NewStoreInstr;
2922     if (BlockInMask || MaskForGaps) {
2923       Value *GroupMask = MaskForGaps;
2924       if (BlockInMask) {
2925         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2926         Value *ShuffledMask = Builder.CreateShuffleVector(
2927             BlockInMaskPart,
2928             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2929             "interleaved.mask");
2930         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2931                                                       ShuffledMask, MaskForGaps)
2932                                 : ShuffledMask;
2933       }
2934       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2935                                                 Group->getAlign(), GroupMask);
2936     } else
2937       NewStoreInstr =
2938           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2939 
2940     Group->addMetadata(NewStoreInstr);
2941   }
2942 }
2943 
2944 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2945                                                VPReplicateRecipe *RepRecipe,
2946                                                const VPIteration &Instance,
2947                                                bool IfPredicateInstr,
2948                                                VPTransformState &State) {
2949   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2950 
2951   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2952   // the first lane and part.
2953   if (isa<NoAliasScopeDeclInst>(Instr))
2954     if (!Instance.isFirstIteration())
2955       return;
2956 
2957   setDebugLocFromInst(Instr);
2958 
2959   // Does this instruction return a value ?
2960   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2961 
2962   Instruction *Cloned = Instr->clone();
2963   if (!IsVoidRetTy)
2964     Cloned->setName(Instr->getName() + ".cloned");
2965 
2966   // If the scalarized instruction contributes to the address computation of a
2967   // widen masked load/store which was in a basic block that needed predication
2968   // and is not predicated after vectorization, we can't propagate
2969   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2970   // instruction could feed a poison value to the base address of the widen
2971   // load/store.
2972   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2973     Cloned->dropPoisonGeneratingFlags();
2974 
2975   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2976                                Builder.GetInsertPoint());
2977   // Replace the operands of the cloned instructions with their scalar
2978   // equivalents in the new loop.
2979   for (auto &I : enumerate(RepRecipe->operands())) {
2980     auto InputInstance = Instance;
2981     VPValue *Operand = I.value();
2982     if (State.Plan->isUniformAfterVectorization(Operand))
2983       InputInstance.Lane = VPLane::getFirstLane();
2984     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2985   }
2986   addNewMetadata(Cloned, Instr);
2987 
2988   // Place the cloned scalar in the new loop.
2989   Builder.Insert(Cloned);
2990 
2991   State.set(RepRecipe, Cloned, Instance);
2992 
2993   // If we just cloned a new assumption, add it the assumption cache.
2994   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2995     AC->registerAssumption(II);
2996 
2997   // End if-block.
2998   if (IfPredicateInstr)
2999     PredicatedInstructions.push_back(Cloned);
3000 }
3001 
3002 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
3003   BasicBlock *Header = L->getHeader();
3004   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
3005 
3006   IRBuilder<> B(Header->getTerminator());
3007   Instruction *OldInst =
3008       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
3009   setDebugLocFromInst(OldInst, &B);
3010 
3011   // Connect the header to the exit and header blocks and replace the old
3012   // terminator.
3013   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
3014 
3015   // Now we have two terminators. Remove the old one from the block.
3016   Header->getTerminator()->eraseFromParent();
3017 }
3018 
3019 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3020   if (TripCount)
3021     return TripCount;
3022 
3023   assert(L && "Create Trip Count for null loop.");
3024   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3025   // Find the loop boundaries.
3026   ScalarEvolution *SE = PSE.getSE();
3027   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3028   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3029          "Invalid loop count");
3030 
3031   Type *IdxTy = Legal->getWidestInductionType();
3032   assert(IdxTy && "No type for induction");
3033 
3034   // The exit count might have the type of i64 while the phi is i32. This can
3035   // happen if we have an induction variable that is sign extended before the
3036   // compare. The only way that we get a backedge taken count is that the
3037   // induction variable was signed and as such will not overflow. In such a case
3038   // truncation is legal.
3039   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3040       IdxTy->getPrimitiveSizeInBits())
3041     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3042   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3043 
3044   // Get the total trip count from the count by adding 1.
3045   const SCEV *ExitCount = SE->getAddExpr(
3046       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3047 
3048   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3049 
3050   // Expand the trip count and place the new instructions in the preheader.
3051   // Notice that the pre-header does not change, only the loop body.
3052   SCEVExpander Exp(*SE, DL, "induction");
3053 
3054   // Count holds the overall loop count (N).
3055   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3056                                 L->getLoopPreheader()->getTerminator());
3057 
3058   if (TripCount->getType()->isPointerTy())
3059     TripCount =
3060         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3061                                     L->getLoopPreheader()->getTerminator());
3062 
3063   return TripCount;
3064 }
3065 
3066 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3067   if (VectorTripCount)
3068     return VectorTripCount;
3069 
3070   Value *TC = getOrCreateTripCount(L);
3071   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3072 
3073   Type *Ty = TC->getType();
3074   // This is where we can make the step a runtime constant.
3075   Value *Step = createStepForVF(Builder, Ty, VF, UF);
3076 
3077   // If the tail is to be folded by masking, round the number of iterations N
3078   // up to a multiple of Step instead of rounding down. This is done by first
3079   // adding Step-1 and then rounding down. Note that it's ok if this addition
3080   // overflows: the vector induction variable will eventually wrap to zero given
3081   // that it starts at zero and its Step is a power of two; the loop will then
3082   // exit, with the last early-exit vector comparison also producing all-true.
3083   if (Cost->foldTailByMasking()) {
3084     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3085            "VF*UF must be a power of 2 when folding tail by masking");
3086     assert(!VF.isScalable() &&
3087            "Tail folding not yet supported for scalable vectors");
3088     TC = Builder.CreateAdd(
3089         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3090   }
3091 
3092   // Now we need to generate the expression for the part of the loop that the
3093   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3094   // iterations are not required for correctness, or N - Step, otherwise. Step
3095   // is equal to the vectorization factor (number of SIMD elements) times the
3096   // unroll factor (number of SIMD instructions).
3097   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3098 
3099   // There are cases where we *must* run at least one iteration in the remainder
3100   // loop.  See the cost model for when this can happen.  If the step evenly
3101   // divides the trip count, we set the remainder to be equal to the step. If
3102   // the step does not evenly divide the trip count, no adjustment is necessary
3103   // since there will already be scalar iterations. Note that the minimum
3104   // iterations check ensures that N >= Step.
3105   if (Cost->requiresScalarEpilogue(VF)) {
3106     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3107     R = Builder.CreateSelect(IsZero, Step, R);
3108   }
3109 
3110   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3111 
3112   return VectorTripCount;
3113 }
3114 
3115 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3116                                                    const DataLayout &DL) {
3117   // Verify that V is a vector type with same number of elements as DstVTy.
3118   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3119   unsigned VF = DstFVTy->getNumElements();
3120   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3121   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3122   Type *SrcElemTy = SrcVecTy->getElementType();
3123   Type *DstElemTy = DstFVTy->getElementType();
3124   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3125          "Vector elements must have same size");
3126 
3127   // Do a direct cast if element types are castable.
3128   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3129     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3130   }
3131   // V cannot be directly casted to desired vector type.
3132   // May happen when V is a floating point vector but DstVTy is a vector of
3133   // pointers or vice-versa. Handle this using a two-step bitcast using an
3134   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3135   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3136          "Only one type should be a pointer type");
3137   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3138          "Only one type should be a floating point type");
3139   Type *IntTy =
3140       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3141   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3142   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3143   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3144 }
3145 
3146 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3147                                                          BasicBlock *Bypass) {
3148   Value *Count = getOrCreateTripCount(L);
3149   // Reuse existing vector loop preheader for TC checks.
3150   // Note that new preheader block is generated for vector loop.
3151   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3152   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3153 
3154   // Generate code to check if the loop's trip count is less than VF * UF, or
3155   // equal to it in case a scalar epilogue is required; this implies that the
3156   // vector trip count is zero. This check also covers the case where adding one
3157   // to the backedge-taken count overflowed leading to an incorrect trip count
3158   // of zero. In this case we will also jump to the scalar loop.
3159   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3160                                             : ICmpInst::ICMP_ULT;
3161 
3162   // If tail is to be folded, vector loop takes care of all iterations.
3163   Value *CheckMinIters = Builder.getFalse();
3164   if (!Cost->foldTailByMasking()) {
3165     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3166     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3167   }
3168   // Create new preheader for vector loop.
3169   LoopVectorPreHeader =
3170       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3171                  "vector.ph");
3172 
3173   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3174                                DT->getNode(Bypass)->getIDom()) &&
3175          "TC check is expected to dominate Bypass");
3176 
3177   // Update dominator for Bypass & LoopExit (if needed).
3178   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3179   if (!Cost->requiresScalarEpilogue(VF))
3180     // If there is an epilogue which must run, there's no edge from the
3181     // middle block to exit blocks  and thus no need to update the immediate
3182     // dominator of the exit blocks.
3183     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3184 
3185   ReplaceInstWithInst(
3186       TCCheckBlock->getTerminator(),
3187       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3188   LoopBypassBlocks.push_back(TCCheckBlock);
3189 }
3190 
3191 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3192 
3193   BasicBlock *const SCEVCheckBlock =
3194       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3195   if (!SCEVCheckBlock)
3196     return nullptr;
3197 
3198   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3199            (OptForSizeBasedOnProfile &&
3200             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3201          "Cannot SCEV check stride or overflow when optimizing for size");
3202 
3203 
3204   // Update dominator only if this is first RT check.
3205   if (LoopBypassBlocks.empty()) {
3206     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3207     if (!Cost->requiresScalarEpilogue(VF))
3208       // If there is an epilogue which must run, there's no edge from the
3209       // middle block to exit blocks  and thus no need to update the immediate
3210       // dominator of the exit blocks.
3211       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3212   }
3213 
3214   LoopBypassBlocks.push_back(SCEVCheckBlock);
3215   AddedSafetyChecks = true;
3216   return SCEVCheckBlock;
3217 }
3218 
3219 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3220                                                       BasicBlock *Bypass) {
3221   // VPlan-native path does not do any analysis for runtime checks currently.
3222   if (EnableVPlanNativePath)
3223     return nullptr;
3224 
3225   BasicBlock *const MemCheckBlock =
3226       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3227 
3228   // Check if we generated code that checks in runtime if arrays overlap. We put
3229   // the checks into a separate block to make the more common case of few
3230   // elements faster.
3231   if (!MemCheckBlock)
3232     return nullptr;
3233 
3234   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3235     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3236            "Cannot emit memory checks when optimizing for size, unless forced "
3237            "to vectorize.");
3238     ORE->emit([&]() {
3239       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3240                                         L->getStartLoc(), L->getHeader())
3241              << "Code-size may be reduced by not forcing "
3242                 "vectorization, or by source-code modifications "
3243                 "eliminating the need for runtime checks "
3244                 "(e.g., adding 'restrict').";
3245     });
3246   }
3247 
3248   LoopBypassBlocks.push_back(MemCheckBlock);
3249 
3250   AddedSafetyChecks = true;
3251 
3252   // We currently don't use LoopVersioning for the actual loop cloning but we
3253   // still use it to add the noalias metadata.
3254   LVer = std::make_unique<LoopVersioning>(
3255       *Legal->getLAI(),
3256       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3257       DT, PSE.getSE());
3258   LVer->prepareNoAliasMetadata();
3259   return MemCheckBlock;
3260 }
3261 
3262 Value *InnerLoopVectorizer::emitTransformedIndex(
3263     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3264     const InductionDescriptor &ID, BasicBlock *VectorHeader) const {
3265 
3266   SCEVExpander Exp(*SE, DL, "induction");
3267   auto Step = ID.getStep();
3268   auto StartValue = ID.getStartValue();
3269   assert(Index->getType()->getScalarType() == Step->getType() &&
3270          "Index scalar type does not match StepValue type");
3271 
3272   // Note: the IR at this point is broken. We cannot use SE to create any new
3273   // SCEV and then expand it, hoping that SCEV's simplification will give us
3274   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3275   // lead to various SCEV crashes. So all we can do is to use builder and rely
3276   // on InstCombine for future simplifications. Here we handle some trivial
3277   // cases only.
3278   auto CreateAdd = [&B](Value *X, Value *Y) {
3279     assert(X->getType() == Y->getType() && "Types don't match!");
3280     if (auto *CX = dyn_cast<ConstantInt>(X))
3281       if (CX->isZero())
3282         return Y;
3283     if (auto *CY = dyn_cast<ConstantInt>(Y))
3284       if (CY->isZero())
3285         return X;
3286     return B.CreateAdd(X, Y);
3287   };
3288 
3289   // We allow X to be a vector type, in which case Y will potentially be
3290   // splatted into a vector with the same element count.
3291   auto CreateMul = [&B](Value *X, Value *Y) {
3292     assert(X->getType()->getScalarType() == Y->getType() &&
3293            "Types don't match!");
3294     if (auto *CX = dyn_cast<ConstantInt>(X))
3295       if (CX->isOne())
3296         return Y;
3297     if (auto *CY = dyn_cast<ConstantInt>(Y))
3298       if (CY->isOne())
3299         return X;
3300     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3301     if (XVTy && !isa<VectorType>(Y->getType()))
3302       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3303     return B.CreateMul(X, Y);
3304   };
3305 
3306   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3307   // loop, choose the end of the vector loop header (=VectorHeader), because
3308   // the DomTree is not kept up-to-date for additional blocks generated in the
3309   // vector loop. By using the header as insertion point, we guarantee that the
3310   // expanded instructions dominate all their uses.
3311   auto GetInsertPoint = [this, &B, VectorHeader]() {
3312     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3313     if (InsertBB != LoopVectorBody &&
3314         LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB))
3315       return VectorHeader->getTerminator();
3316     return &*B.GetInsertPoint();
3317   };
3318 
3319   switch (ID.getKind()) {
3320   case InductionDescriptor::IK_IntInduction: {
3321     assert(!isa<VectorType>(Index->getType()) &&
3322            "Vector indices not supported for integer inductions yet");
3323     assert(Index->getType() == StartValue->getType() &&
3324            "Index type does not match StartValue type");
3325     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3326       return B.CreateSub(StartValue, Index);
3327     auto *Offset = CreateMul(
3328         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3329     return CreateAdd(StartValue, Offset);
3330   }
3331   case InductionDescriptor::IK_PtrInduction: {
3332     assert(isa<SCEVConstant>(Step) &&
3333            "Expected constant step for pointer induction");
3334     return B.CreateGEP(
3335         ID.getElementType(), StartValue,
3336         CreateMul(Index,
3337                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3338                                     GetInsertPoint())));
3339   }
3340   case InductionDescriptor::IK_FpInduction: {
3341     assert(!isa<VectorType>(Index->getType()) &&
3342            "Vector indices not supported for FP inductions yet");
3343     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3344     auto InductionBinOp = ID.getInductionBinOp();
3345     assert(InductionBinOp &&
3346            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3347             InductionBinOp->getOpcode() == Instruction::FSub) &&
3348            "Original bin op should be defined for FP induction");
3349 
3350     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3351     Value *MulExp = B.CreateFMul(StepValue, Index);
3352     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3353                          "induction");
3354   }
3355   case InductionDescriptor::IK_NoInduction:
3356     return nullptr;
3357   }
3358   llvm_unreachable("invalid enum");
3359 }
3360 
3361 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3362   LoopScalarBody = OrigLoop->getHeader();
3363   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3364   assert(LoopVectorPreHeader && "Invalid loop structure");
3365   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3366   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3367          "multiple exit loop without required epilogue?");
3368 
3369   LoopMiddleBlock =
3370       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3371                  LI, nullptr, Twine(Prefix) + "middle.block");
3372   LoopScalarPreHeader =
3373       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3374                  nullptr, Twine(Prefix) + "scalar.ph");
3375 
3376   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3377 
3378   // Set up the middle block terminator.  Two cases:
3379   // 1) If we know that we must execute the scalar epilogue, emit an
3380   //    unconditional branch.
3381   // 2) Otherwise, we must have a single unique exit block (due to how we
3382   //    implement the multiple exit case).  In this case, set up a conditonal
3383   //    branch from the middle block to the loop scalar preheader, and the
3384   //    exit block.  completeLoopSkeleton will update the condition to use an
3385   //    iteration check, if required to decide whether to execute the remainder.
3386   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3387     BranchInst::Create(LoopScalarPreHeader) :
3388     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3389                        Builder.getTrue());
3390   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3391   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3392 
3393   // We intentionally don't let SplitBlock to update LoopInfo since
3394   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3395   // LoopVectorBody is explicitly added to the correct place few lines later.
3396   LoopVectorBody =
3397       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3398                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3399 
3400   // Update dominator for loop exit.
3401   if (!Cost->requiresScalarEpilogue(VF))
3402     // If there is an epilogue which must run, there's no edge from the
3403     // middle block to exit blocks  and thus no need to update the immediate
3404     // dominator of the exit blocks.
3405     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3406 
3407   // Create and register the new vector loop.
3408   Loop *Lp = LI->AllocateLoop();
3409   Loop *ParentLoop = OrigLoop->getParentLoop();
3410 
3411   // Insert the new loop into the loop nest and register the new basic blocks
3412   // before calling any utilities such as SCEV that require valid LoopInfo.
3413   if (ParentLoop) {
3414     ParentLoop->addChildLoop(Lp);
3415   } else {
3416     LI->addTopLevelLoop(Lp);
3417   }
3418   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3419   return Lp;
3420 }
3421 
3422 void InnerLoopVectorizer::createInductionResumeValues(
3423     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3424   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3425           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3426          "Inconsistent information about additional bypass.");
3427 
3428   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3429   assert(VectorTripCount && L && "Expected valid arguments");
3430   // We are going to resume the execution of the scalar loop.
3431   // Go over all of the induction variables that we found and fix the
3432   // PHIs that are left in the scalar version of the loop.
3433   // The starting values of PHI nodes depend on the counter of the last
3434   // iteration in the vectorized loop.
3435   // If we come from a bypass edge then we need to start from the original
3436   // start value.
3437   Instruction *OldInduction = Legal->getPrimaryInduction();
3438   for (auto &InductionEntry : Legal->getInductionVars()) {
3439     PHINode *OrigPhi = InductionEntry.first;
3440     InductionDescriptor II = InductionEntry.second;
3441 
3442     // Create phi nodes to merge from the  backedge-taken check block.
3443     PHINode *BCResumeVal =
3444         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3445                         LoopScalarPreHeader->getTerminator());
3446     // Copy original phi DL over to the new one.
3447     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3448     Value *&EndValue = IVEndValues[OrigPhi];
3449     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3450     if (OrigPhi == OldInduction) {
3451       // We know what the end value is.
3452       EndValue = VectorTripCount;
3453     } else {
3454       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3455 
3456       // Fast-math-flags propagate from the original induction instruction.
3457       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3458         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3459 
3460       Type *StepType = II.getStep()->getType();
3461       Instruction::CastOps CastOp =
3462           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3463       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3464       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3465       EndValue =
3466           emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3467       EndValue->setName("ind.end");
3468 
3469       // Compute the end value for the additional bypass (if applicable).
3470       if (AdditionalBypass.first) {
3471         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3472         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3473                                          StepType, true);
3474         CRD =
3475             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3476         EndValueFromAdditionalBypass =
3477             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3478         EndValueFromAdditionalBypass->setName("ind.end");
3479       }
3480     }
3481     // The new PHI merges the original incoming value, in case of a bypass,
3482     // or the value at the end of the vectorized loop.
3483     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3484 
3485     // Fix the scalar body counter (PHI node).
3486     // The old induction's phi node in the scalar body needs the truncated
3487     // value.
3488     for (BasicBlock *BB : LoopBypassBlocks)
3489       BCResumeVal->addIncoming(II.getStartValue(), BB);
3490 
3491     if (AdditionalBypass.first)
3492       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3493                                             EndValueFromAdditionalBypass);
3494 
3495     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3496   }
3497 }
3498 
3499 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3500                                                       MDNode *OrigLoopID) {
3501   assert(L && "Expected valid loop.");
3502 
3503   // The trip counts should be cached by now.
3504   Value *Count = getOrCreateTripCount(L);
3505   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3506 
3507   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3508 
3509   // Add a check in the middle block to see if we have completed
3510   // all of the iterations in the first vector loop.  Three cases:
3511   // 1) If we require a scalar epilogue, there is no conditional branch as
3512   //    we unconditionally branch to the scalar preheader.  Do nothing.
3513   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3514   //    Thus if tail is to be folded, we know we don't need to run the
3515   //    remainder and we can use the previous value for the condition (true).
3516   // 3) Otherwise, construct a runtime check.
3517   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3518     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3519                                         Count, VectorTripCount, "cmp.n",
3520                                         LoopMiddleBlock->getTerminator());
3521 
3522     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3523     // of the corresponding compare because they may have ended up with
3524     // different line numbers and we want to avoid awkward line stepping while
3525     // debugging. Eg. if the compare has got a line number inside the loop.
3526     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3527     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3528   }
3529 
3530   // Get ready to start creating new instructions into the vectorized body.
3531   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3532          "Inconsistent vector loop preheader");
3533   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3534 
3535 #ifdef EXPENSIVE_CHECKS
3536   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3537   LI->verify(*DT);
3538 #endif
3539 
3540   return LoopVectorPreHeader;
3541 }
3542 
3543 std::pair<BasicBlock *, Value *>
3544 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3545   /*
3546    In this function we generate a new loop. The new loop will contain
3547    the vectorized instructions while the old loop will continue to run the
3548    scalar remainder.
3549 
3550        [ ] <-- loop iteration number check.
3551     /   |
3552    /    v
3553   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3554   |  /  |
3555   | /   v
3556   ||   [ ]     <-- vector pre header.
3557   |/    |
3558   |     v
3559   |    [  ] \
3560   |    [  ]_|   <-- vector loop.
3561   |     |
3562   |     v
3563   \   -[ ]   <--- middle-block.
3564    \/   |
3565    /\   v
3566    | ->[ ]     <--- new preheader.
3567    |    |
3568  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3569    |   [ ] \
3570    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3571     \   |
3572      \  v
3573       >[ ]     <-- exit block(s).
3574    ...
3575    */
3576 
3577   // Get the metadata of the original loop before it gets modified.
3578   MDNode *OrigLoopID = OrigLoop->getLoopID();
3579 
3580   // Workaround!  Compute the trip count of the original loop and cache it
3581   // before we start modifying the CFG.  This code has a systemic problem
3582   // wherein it tries to run analysis over partially constructed IR; this is
3583   // wrong, and not simply for SCEV.  The trip count of the original loop
3584   // simply happens to be prone to hitting this in practice.  In theory, we
3585   // can hit the same issue for any SCEV, or ValueTracking query done during
3586   // mutation.  See PR49900.
3587   getOrCreateTripCount(OrigLoop);
3588 
3589   // Create an empty vector loop, and prepare basic blocks for the runtime
3590   // checks.
3591   Loop *Lp = createVectorLoopSkeleton("");
3592 
3593   // Now, compare the new count to zero. If it is zero skip the vector loop and
3594   // jump to the scalar loop. This check also covers the case where the
3595   // backedge-taken count is uint##_max: adding one to it will overflow leading
3596   // to an incorrect trip count of zero. In this (rare) case we will also jump
3597   // to the scalar loop.
3598   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3599 
3600   // Generate the code to check any assumptions that we've made for SCEV
3601   // expressions.
3602   emitSCEVChecks(Lp, LoopScalarPreHeader);
3603 
3604   // Generate the code that checks in runtime if arrays overlap. We put the
3605   // checks into a separate block to make the more common case of few elements
3606   // faster.
3607   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3608 
3609   createHeaderBranch(Lp);
3610 
3611   // Emit phis for the new starting index of the scalar loop.
3612   createInductionResumeValues(Lp);
3613 
3614   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3615 }
3616 
3617 // Fix up external users of the induction variable. At this point, we are
3618 // in LCSSA form, with all external PHIs that use the IV having one input value,
3619 // coming from the remainder loop. We need those PHIs to also have a correct
3620 // value for the IV when arriving directly from the middle block.
3621 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3622                                        const InductionDescriptor &II,
3623                                        Value *CountRoundDown, Value *EndValue,
3624                                        BasicBlock *MiddleBlock) {
3625   // There are two kinds of external IV usages - those that use the value
3626   // computed in the last iteration (the PHI) and those that use the penultimate
3627   // value (the value that feeds into the phi from the loop latch).
3628   // We allow both, but they, obviously, have different values.
3629 
3630   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3631 
3632   DenseMap<Value *, Value *> MissingVals;
3633 
3634   // An external user of the last iteration's value should see the value that
3635   // the remainder loop uses to initialize its own IV.
3636   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3637   for (User *U : PostInc->users()) {
3638     Instruction *UI = cast<Instruction>(U);
3639     if (!OrigLoop->contains(UI)) {
3640       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3641       MissingVals[UI] = EndValue;
3642     }
3643   }
3644 
3645   // An external user of the penultimate value need to see EndValue - Step.
3646   // The simplest way to get this is to recompute it from the constituent SCEVs,
3647   // that is Start + (Step * (CRD - 1)).
3648   for (User *U : OrigPhi->users()) {
3649     auto *UI = cast<Instruction>(U);
3650     if (!OrigLoop->contains(UI)) {
3651       const DataLayout &DL =
3652           OrigLoop->getHeader()->getModule()->getDataLayout();
3653       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3654 
3655       IRBuilder<> B(MiddleBlock->getTerminator());
3656 
3657       // Fast-math-flags propagate from the original induction instruction.
3658       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3659         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3660 
3661       Value *CountMinusOne = B.CreateSub(
3662           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3663       Value *CMO =
3664           !II.getStep()->getType()->isIntegerTy()
3665               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3666                              II.getStep()->getType())
3667               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3668       CMO->setName("cast.cmo");
3669       Value *Escape =
3670           emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody);
3671       Escape->setName("ind.escape");
3672       MissingVals[UI] = Escape;
3673     }
3674   }
3675 
3676   for (auto &I : MissingVals) {
3677     PHINode *PHI = cast<PHINode>(I.first);
3678     // One corner case we have to handle is two IVs "chasing" each-other,
3679     // that is %IV2 = phi [...], [ %IV1, %latch ]
3680     // In this case, if IV1 has an external use, we need to avoid adding both
3681     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3682     // don't already have an incoming value for the middle block.
3683     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3684       PHI->addIncoming(I.second, MiddleBlock);
3685   }
3686 }
3687 
3688 namespace {
3689 
3690 struct CSEDenseMapInfo {
3691   static bool canHandle(const Instruction *I) {
3692     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3693            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3694   }
3695 
3696   static inline Instruction *getEmptyKey() {
3697     return DenseMapInfo<Instruction *>::getEmptyKey();
3698   }
3699 
3700   static inline Instruction *getTombstoneKey() {
3701     return DenseMapInfo<Instruction *>::getTombstoneKey();
3702   }
3703 
3704   static unsigned getHashValue(const Instruction *I) {
3705     assert(canHandle(I) && "Unknown instruction!");
3706     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3707                                                            I->value_op_end()));
3708   }
3709 
3710   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3711     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3712         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3713       return LHS == RHS;
3714     return LHS->isIdenticalTo(RHS);
3715   }
3716 };
3717 
3718 } // end anonymous namespace
3719 
3720 ///Perform cse of induction variable instructions.
3721 static void cse(BasicBlock *BB) {
3722   // Perform simple cse.
3723   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3724   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3725     if (!CSEDenseMapInfo::canHandle(&In))
3726       continue;
3727 
3728     // Check if we can replace this instruction with any of the
3729     // visited instructions.
3730     if (Instruction *V = CSEMap.lookup(&In)) {
3731       In.replaceAllUsesWith(V);
3732       In.eraseFromParent();
3733       continue;
3734     }
3735 
3736     CSEMap[&In] = &In;
3737   }
3738 }
3739 
3740 InstructionCost
3741 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3742                                               bool &NeedToScalarize) const {
3743   Function *F = CI->getCalledFunction();
3744   Type *ScalarRetTy = CI->getType();
3745   SmallVector<Type *, 4> Tys, ScalarTys;
3746   for (auto &ArgOp : CI->args())
3747     ScalarTys.push_back(ArgOp->getType());
3748 
3749   // Estimate cost of scalarized vector call. The source operands are assumed
3750   // to be vectors, so we need to extract individual elements from there,
3751   // execute VF scalar calls, and then gather the result into the vector return
3752   // value.
3753   InstructionCost ScalarCallCost =
3754       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3755   if (VF.isScalar())
3756     return ScalarCallCost;
3757 
3758   // Compute corresponding vector type for return value and arguments.
3759   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3760   for (Type *ScalarTy : ScalarTys)
3761     Tys.push_back(ToVectorTy(ScalarTy, VF));
3762 
3763   // Compute costs of unpacking argument values for the scalar calls and
3764   // packing the return values to a vector.
3765   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3766 
3767   InstructionCost Cost =
3768       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3769 
3770   // If we can't emit a vector call for this function, then the currently found
3771   // cost is the cost we need to return.
3772   NeedToScalarize = true;
3773   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3774   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3775 
3776   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3777     return Cost;
3778 
3779   // If the corresponding vector cost is cheaper, return its cost.
3780   InstructionCost VectorCallCost =
3781       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3782   if (VectorCallCost < Cost) {
3783     NeedToScalarize = false;
3784     Cost = VectorCallCost;
3785   }
3786   return Cost;
3787 }
3788 
3789 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3790   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3791     return Elt;
3792   return VectorType::get(Elt, VF);
3793 }
3794 
3795 InstructionCost
3796 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3797                                                    ElementCount VF) const {
3798   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3799   assert(ID && "Expected intrinsic call!");
3800   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3801   FastMathFlags FMF;
3802   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3803     FMF = FPMO->getFastMathFlags();
3804 
3805   SmallVector<const Value *> Arguments(CI->args());
3806   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3807   SmallVector<Type *> ParamTys;
3808   std::transform(FTy->param_begin(), FTy->param_end(),
3809                  std::back_inserter(ParamTys),
3810                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3811 
3812   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3813                                     dyn_cast<IntrinsicInst>(CI));
3814   return TTI.getIntrinsicInstrCost(CostAttrs,
3815                                    TargetTransformInfo::TCK_RecipThroughput);
3816 }
3817 
3818 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3819   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3820   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3821   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3822 }
3823 
3824 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3825   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3826   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3827   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3828 }
3829 
3830 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3831   // For every instruction `I` in MinBWs, truncate the operands, create a
3832   // truncated version of `I` and reextend its result. InstCombine runs
3833   // later and will remove any ext/trunc pairs.
3834   SmallPtrSet<Value *, 4> Erased;
3835   for (const auto &KV : Cost->getMinimalBitwidths()) {
3836     // If the value wasn't vectorized, we must maintain the original scalar
3837     // type. The absence of the value from State indicates that it
3838     // wasn't vectorized.
3839     // FIXME: Should not rely on getVPValue at this point.
3840     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3841     if (!State.hasAnyVectorValue(Def))
3842       continue;
3843     for (unsigned Part = 0; Part < UF; ++Part) {
3844       Value *I = State.get(Def, Part);
3845       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3846         continue;
3847       Type *OriginalTy = I->getType();
3848       Type *ScalarTruncatedTy =
3849           IntegerType::get(OriginalTy->getContext(), KV.second);
3850       auto *TruncatedTy = VectorType::get(
3851           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3852       if (TruncatedTy == OriginalTy)
3853         continue;
3854 
3855       IRBuilder<> B(cast<Instruction>(I));
3856       auto ShrinkOperand = [&](Value *V) -> Value * {
3857         if (auto *ZI = dyn_cast<ZExtInst>(V))
3858           if (ZI->getSrcTy() == TruncatedTy)
3859             return ZI->getOperand(0);
3860         return B.CreateZExtOrTrunc(V, TruncatedTy);
3861       };
3862 
3863       // The actual instruction modification depends on the instruction type,
3864       // unfortunately.
3865       Value *NewI = nullptr;
3866       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3867         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3868                              ShrinkOperand(BO->getOperand(1)));
3869 
3870         // Any wrapping introduced by shrinking this operation shouldn't be
3871         // considered undefined behavior. So, we can't unconditionally copy
3872         // arithmetic wrapping flags to NewI.
3873         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3874       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3875         NewI =
3876             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3877                          ShrinkOperand(CI->getOperand(1)));
3878       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3879         NewI = B.CreateSelect(SI->getCondition(),
3880                               ShrinkOperand(SI->getTrueValue()),
3881                               ShrinkOperand(SI->getFalseValue()));
3882       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3883         switch (CI->getOpcode()) {
3884         default:
3885           llvm_unreachable("Unhandled cast!");
3886         case Instruction::Trunc:
3887           NewI = ShrinkOperand(CI->getOperand(0));
3888           break;
3889         case Instruction::SExt:
3890           NewI = B.CreateSExtOrTrunc(
3891               CI->getOperand(0),
3892               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3893           break;
3894         case Instruction::ZExt:
3895           NewI = B.CreateZExtOrTrunc(
3896               CI->getOperand(0),
3897               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3898           break;
3899         }
3900       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3901         auto Elements0 =
3902             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3903         auto *O0 = B.CreateZExtOrTrunc(
3904             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3905         auto Elements1 =
3906             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3907         auto *O1 = B.CreateZExtOrTrunc(
3908             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3909 
3910         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3911       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3912         // Don't do anything with the operands, just extend the result.
3913         continue;
3914       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3915         auto Elements =
3916             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3917         auto *O0 = B.CreateZExtOrTrunc(
3918             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3919         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3920         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3921       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3922         auto Elements =
3923             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3924         auto *O0 = B.CreateZExtOrTrunc(
3925             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3926         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3927       } else {
3928         // If we don't know what to do, be conservative and don't do anything.
3929         continue;
3930       }
3931 
3932       // Lastly, extend the result.
3933       NewI->takeName(cast<Instruction>(I));
3934       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3935       I->replaceAllUsesWith(Res);
3936       cast<Instruction>(I)->eraseFromParent();
3937       Erased.insert(I);
3938       State.reset(Def, Res, Part);
3939     }
3940   }
3941 
3942   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3943   for (const auto &KV : Cost->getMinimalBitwidths()) {
3944     // If the value wasn't vectorized, we must maintain the original scalar
3945     // type. The absence of the value from State indicates that it
3946     // wasn't vectorized.
3947     // FIXME: Should not rely on getVPValue at this point.
3948     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3949     if (!State.hasAnyVectorValue(Def))
3950       continue;
3951     for (unsigned Part = 0; Part < UF; ++Part) {
3952       Value *I = State.get(Def, Part);
3953       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3954       if (Inst && Inst->use_empty()) {
3955         Value *NewI = Inst->getOperand(0);
3956         Inst->eraseFromParent();
3957         State.reset(Def, NewI, Part);
3958       }
3959     }
3960   }
3961 }
3962 
3963 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3964   // Insert truncates and extends for any truncated instructions as hints to
3965   // InstCombine.
3966   if (VF.isVector())
3967     truncateToMinimalBitwidths(State);
3968 
3969   // Fix widened non-induction PHIs by setting up the PHI operands.
3970   if (OrigPHIsToFix.size()) {
3971     assert(EnableVPlanNativePath &&
3972            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3973     fixNonInductionPHIs(State);
3974   }
3975 
3976   // At this point every instruction in the original loop is widened to a
3977   // vector form. Now we need to fix the recurrences in the loop. These PHI
3978   // nodes are currently empty because we did not want to introduce cycles.
3979   // This is the second stage of vectorizing recurrences.
3980   fixCrossIterationPHIs(State);
3981 
3982   // Forget the original basic block.
3983   PSE.getSE()->forgetLoop(OrigLoop);
3984 
3985   // If we inserted an edge from the middle block to the unique exit block,
3986   // update uses outside the loop (phis) to account for the newly inserted
3987   // edge.
3988   if (!Cost->requiresScalarEpilogue(VF)) {
3989     // Fix-up external users of the induction variables.
3990     for (auto &Entry : Legal->getInductionVars())
3991       fixupIVUsers(Entry.first, Entry.second,
3992                    getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3993                    IVEndValues[Entry.first], LoopMiddleBlock);
3994 
3995     fixLCSSAPHIs(State);
3996   }
3997 
3998   for (Instruction *PI : PredicatedInstructions)
3999     sinkScalarOperands(&*PI);
4000 
4001   // Remove redundant induction instructions.
4002   cse(LoopVectorBody);
4003 
4004   // Set/update profile weights for the vector and remainder loops as original
4005   // loop iterations are now distributed among them. Note that original loop
4006   // represented by LoopScalarBody becomes remainder loop after vectorization.
4007   //
4008   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4009   // end up getting slightly roughened result but that should be OK since
4010   // profile is not inherently precise anyway. Note also possible bypass of
4011   // vector code caused by legality checks is ignored, assigning all the weight
4012   // to the vector loop, optimistically.
4013   //
4014   // For scalable vectorization we can't know at compile time how many iterations
4015   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4016   // vscale of '1'.
4017   setProfileInfoAfterUnrolling(
4018       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4019       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4020 }
4021 
4022 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4023   // In order to support recurrences we need to be able to vectorize Phi nodes.
4024   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4025   // stage #2: We now need to fix the recurrences by adding incoming edges to
4026   // the currently empty PHI nodes. At this point every instruction in the
4027   // original loop is widened to a vector form so we can use them to construct
4028   // the incoming edges.
4029   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4030   for (VPRecipeBase &R : Header->phis()) {
4031     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
4032       fixReduction(ReductionPhi, State);
4033     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
4034       fixFirstOrderRecurrence(FOR, State);
4035   }
4036 }
4037 
4038 void InnerLoopVectorizer::fixFirstOrderRecurrence(
4039     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
4040   // This is the second phase of vectorizing first-order recurrences. An
4041   // overview of the transformation is described below. Suppose we have the
4042   // following loop.
4043   //
4044   //   for (int i = 0; i < n; ++i)
4045   //     b[i] = a[i] - a[i - 1];
4046   //
4047   // There is a first-order recurrence on "a". For this loop, the shorthand
4048   // scalar IR looks like:
4049   //
4050   //   scalar.ph:
4051   //     s_init = a[-1]
4052   //     br scalar.body
4053   //
4054   //   scalar.body:
4055   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4056   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4057   //     s2 = a[i]
4058   //     b[i] = s2 - s1
4059   //     br cond, scalar.body, ...
4060   //
4061   // In this example, s1 is a recurrence because it's value depends on the
4062   // previous iteration. In the first phase of vectorization, we created a
4063   // vector phi v1 for s1. We now complete the vectorization and produce the
4064   // shorthand vector IR shown below (for VF = 4, UF = 1).
4065   //
4066   //   vector.ph:
4067   //     v_init = vector(..., ..., ..., a[-1])
4068   //     br vector.body
4069   //
4070   //   vector.body
4071   //     i = phi [0, vector.ph], [i+4, vector.body]
4072   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4073   //     v2 = a[i, i+1, i+2, i+3];
4074   //     v3 = vector(v1(3), v2(0, 1, 2))
4075   //     b[i, i+1, i+2, i+3] = v2 - v3
4076   //     br cond, vector.body, middle.block
4077   //
4078   //   middle.block:
4079   //     x = v2(3)
4080   //     br scalar.ph
4081   //
4082   //   scalar.ph:
4083   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4084   //     br scalar.body
4085   //
4086   // After execution completes the vector loop, we extract the next value of
4087   // the recurrence (x) to use as the initial value in the scalar loop.
4088 
4089   // Extract the last vector element in the middle block. This will be the
4090   // initial value for the recurrence when jumping to the scalar loop.
4091   VPValue *PreviousDef = PhiR->getBackedgeValue();
4092   Value *Incoming = State.get(PreviousDef, UF - 1);
4093   auto *ExtractForScalar = Incoming;
4094   auto *IdxTy = Builder.getInt32Ty();
4095   if (VF.isVector()) {
4096     auto *One = ConstantInt::get(IdxTy, 1);
4097     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4098     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4099     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4100     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4101                                                     "vector.recur.extract");
4102   }
4103   // Extract the second last element in the middle block if the
4104   // Phi is used outside the loop. We need to extract the phi itself
4105   // and not the last element (the phi update in the current iteration). This
4106   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4107   // when the scalar loop is not run at all.
4108   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4109   if (VF.isVector()) {
4110     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4111     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4112     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4113         Incoming, Idx, "vector.recur.extract.for.phi");
4114   } else if (UF > 1)
4115     // When loop is unrolled without vectorizing, initialize
4116     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4117     // of `Incoming`. This is analogous to the vectorized case above: extracting
4118     // the second last element when VF > 1.
4119     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4120 
4121   // Fix the initial value of the original recurrence in the scalar loop.
4122   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4123   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4124   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4125   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4126   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4127     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4128     Start->addIncoming(Incoming, BB);
4129   }
4130 
4131   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4132   Phi->setName("scalar.recur");
4133 
4134   // Finally, fix users of the recurrence outside the loop. The users will need
4135   // either the last value of the scalar recurrence or the last value of the
4136   // vector recurrence we extracted in the middle block. Since the loop is in
4137   // LCSSA form, we just need to find all the phi nodes for the original scalar
4138   // recurrence in the exit block, and then add an edge for the middle block.
4139   // Note that LCSSA does not imply single entry when the original scalar loop
4140   // had multiple exiting edges (as we always run the last iteration in the
4141   // scalar epilogue); in that case, there is no edge from middle to exit and
4142   // and thus no phis which needed updated.
4143   if (!Cost->requiresScalarEpilogue(VF))
4144     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4145       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
4146         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4147 }
4148 
4149 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4150                                        VPTransformState &State) {
4151   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4152   // Get it's reduction variable descriptor.
4153   assert(Legal->isReductionVariable(OrigPhi) &&
4154          "Unable to find the reduction variable");
4155   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4156 
4157   RecurKind RK = RdxDesc.getRecurrenceKind();
4158   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4159   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4160   setDebugLocFromInst(ReductionStartValue);
4161 
4162   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4163   // This is the vector-clone of the value that leaves the loop.
4164   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4165 
4166   // Wrap flags are in general invalid after vectorization, clear them.
4167   clearReductionWrapFlags(RdxDesc, State);
4168 
4169   // Before each round, move the insertion point right between
4170   // the PHIs and the values we are going to write.
4171   // This allows us to write both PHINodes and the extractelement
4172   // instructions.
4173   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4174 
4175   setDebugLocFromInst(LoopExitInst);
4176 
4177   Type *PhiTy = OrigPhi->getType();
4178   // If tail is folded by masking, the vector value to leave the loop should be
4179   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4180   // instead of the former. For an inloop reduction the reduction will already
4181   // be predicated, and does not need to be handled here.
4182   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4183     for (unsigned Part = 0; Part < UF; ++Part) {
4184       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4185       Value *Sel = nullptr;
4186       for (User *U : VecLoopExitInst->users()) {
4187         if (isa<SelectInst>(U)) {
4188           assert(!Sel && "Reduction exit feeding two selects");
4189           Sel = U;
4190         } else
4191           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4192       }
4193       assert(Sel && "Reduction exit feeds no select");
4194       State.reset(LoopExitInstDef, Sel, Part);
4195 
4196       // If the target can create a predicated operator for the reduction at no
4197       // extra cost in the loop (for example a predicated vadd), it can be
4198       // cheaper for the select to remain in the loop than be sunk out of it,
4199       // and so use the select value for the phi instead of the old
4200       // LoopExitValue.
4201       if (PreferPredicatedReductionSelect ||
4202           TTI->preferPredicatedReductionSelect(
4203               RdxDesc.getOpcode(), PhiTy,
4204               TargetTransformInfo::ReductionFlags())) {
4205         auto *VecRdxPhi =
4206             cast<PHINode>(State.get(PhiR, Part));
4207         VecRdxPhi->setIncomingValueForBlock(
4208             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4209       }
4210     }
4211   }
4212 
4213   // If the vector reduction can be performed in a smaller type, we truncate
4214   // then extend the loop exit value to enable InstCombine to evaluate the
4215   // entire expression in the smaller type.
4216   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4217     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4218     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4219     Builder.SetInsertPoint(
4220         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4221     VectorParts RdxParts(UF);
4222     for (unsigned Part = 0; Part < UF; ++Part) {
4223       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4224       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4225       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4226                                         : Builder.CreateZExt(Trunc, VecTy);
4227       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
4228         if (U != Trunc) {
4229           U->replaceUsesOfWith(RdxParts[Part], Extnd);
4230           RdxParts[Part] = Extnd;
4231         }
4232     }
4233     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4234     for (unsigned Part = 0; Part < UF; ++Part) {
4235       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4236       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4237     }
4238   }
4239 
4240   // Reduce all of the unrolled parts into a single vector.
4241   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4242   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4243 
4244   // The middle block terminator has already been assigned a DebugLoc here (the
4245   // OrigLoop's single latch terminator). We want the whole middle block to
4246   // appear to execute on this line because: (a) it is all compiler generated,
4247   // (b) these instructions are always executed after evaluating the latch
4248   // conditional branch, and (c) other passes may add new predecessors which
4249   // terminate on this line. This is the easiest way to ensure we don't
4250   // accidentally cause an extra step back into the loop while debugging.
4251   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4252   if (PhiR->isOrdered())
4253     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4254   else {
4255     // Floating-point operations should have some FMF to enable the reduction.
4256     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4257     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4258     for (unsigned Part = 1; Part < UF; ++Part) {
4259       Value *RdxPart = State.get(LoopExitInstDef, Part);
4260       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4261         ReducedPartRdx = Builder.CreateBinOp(
4262             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4263       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4264         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4265                                            ReducedPartRdx, RdxPart);
4266       else
4267         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4268     }
4269   }
4270 
4271   // Create the reduction after the loop. Note that inloop reductions create the
4272   // target reduction in the loop using a Reduction recipe.
4273   if (VF.isVector() && !PhiR->isInLoop()) {
4274     ReducedPartRdx =
4275         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4276     // If the reduction can be performed in a smaller type, we need to extend
4277     // the reduction to the wider type before we branch to the original loop.
4278     if (PhiTy != RdxDesc.getRecurrenceType())
4279       ReducedPartRdx = RdxDesc.isSigned()
4280                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4281                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4282   }
4283 
4284   // Create a phi node that merges control-flow from the backedge-taken check
4285   // block and the middle block.
4286   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4287                                         LoopScalarPreHeader->getTerminator());
4288   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4289     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4290   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4291 
4292   // Now, we need to fix the users of the reduction variable
4293   // inside and outside of the scalar remainder loop.
4294 
4295   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4296   // in the exit blocks.  See comment on analogous loop in
4297   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4298   if (!Cost->requiresScalarEpilogue(VF))
4299     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4300       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4301         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4302 
4303   // Fix the scalar loop reduction variable with the incoming reduction sum
4304   // from the vector body and from the backedge value.
4305   int IncomingEdgeBlockIdx =
4306       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4307   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4308   // Pick the other block.
4309   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4310   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4311   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4312 }
4313 
4314 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4315                                                   VPTransformState &State) {
4316   RecurKind RK = RdxDesc.getRecurrenceKind();
4317   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4318     return;
4319 
4320   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4321   assert(LoopExitInstr && "null loop exit instruction");
4322   SmallVector<Instruction *, 8> Worklist;
4323   SmallPtrSet<Instruction *, 8> Visited;
4324   Worklist.push_back(LoopExitInstr);
4325   Visited.insert(LoopExitInstr);
4326 
4327   while (!Worklist.empty()) {
4328     Instruction *Cur = Worklist.pop_back_val();
4329     if (isa<OverflowingBinaryOperator>(Cur))
4330       for (unsigned Part = 0; Part < UF; ++Part) {
4331         // FIXME: Should not rely on getVPValue at this point.
4332         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4333         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4334       }
4335 
4336     for (User *U : Cur->users()) {
4337       Instruction *UI = cast<Instruction>(U);
4338       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4339           Visited.insert(UI).second)
4340         Worklist.push_back(UI);
4341     }
4342   }
4343 }
4344 
4345 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4346   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4347     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4348       // Some phis were already hand updated by the reduction and recurrence
4349       // code above, leave them alone.
4350       continue;
4351 
4352     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4353     // Non-instruction incoming values will have only one value.
4354 
4355     VPLane Lane = VPLane::getFirstLane();
4356     if (isa<Instruction>(IncomingValue) &&
4357         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4358                                            VF))
4359       Lane = VPLane::getLastLaneForVF(VF);
4360 
4361     // Can be a loop invariant incoming value or the last scalar value to be
4362     // extracted from the vectorized loop.
4363     // FIXME: Should not rely on getVPValue at this point.
4364     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4365     Value *lastIncomingValue =
4366         OrigLoop->isLoopInvariant(IncomingValue)
4367             ? IncomingValue
4368             : State.get(State.Plan->getVPValue(IncomingValue, true),
4369                         VPIteration(UF - 1, Lane));
4370     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4371   }
4372 }
4373 
4374 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4375   // The basic block and loop containing the predicated instruction.
4376   auto *PredBB = PredInst->getParent();
4377   auto *VectorLoop = LI->getLoopFor(PredBB);
4378 
4379   // Initialize a worklist with the operands of the predicated instruction.
4380   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4381 
4382   // Holds instructions that we need to analyze again. An instruction may be
4383   // reanalyzed if we don't yet know if we can sink it or not.
4384   SmallVector<Instruction *, 8> InstsToReanalyze;
4385 
4386   // Returns true if a given use occurs in the predicated block. Phi nodes use
4387   // their operands in their corresponding predecessor blocks.
4388   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4389     auto *I = cast<Instruction>(U.getUser());
4390     BasicBlock *BB = I->getParent();
4391     if (auto *Phi = dyn_cast<PHINode>(I))
4392       BB = Phi->getIncomingBlock(
4393           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4394     return BB == PredBB;
4395   };
4396 
4397   // Iteratively sink the scalarized operands of the predicated instruction
4398   // into the block we created for it. When an instruction is sunk, it's
4399   // operands are then added to the worklist. The algorithm ends after one pass
4400   // through the worklist doesn't sink a single instruction.
4401   bool Changed;
4402   do {
4403     // Add the instructions that need to be reanalyzed to the worklist, and
4404     // reset the changed indicator.
4405     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4406     InstsToReanalyze.clear();
4407     Changed = false;
4408 
4409     while (!Worklist.empty()) {
4410       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4411 
4412       // We can't sink an instruction if it is a phi node, is not in the loop,
4413       // or may have side effects.
4414       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4415           I->mayHaveSideEffects())
4416         continue;
4417 
4418       // If the instruction is already in PredBB, check if we can sink its
4419       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4420       // sinking the scalar instruction I, hence it appears in PredBB; but it
4421       // may have failed to sink I's operands (recursively), which we try
4422       // (again) here.
4423       if (I->getParent() == PredBB) {
4424         Worklist.insert(I->op_begin(), I->op_end());
4425         continue;
4426       }
4427 
4428       // It's legal to sink the instruction if all its uses occur in the
4429       // predicated block. Otherwise, there's nothing to do yet, and we may
4430       // need to reanalyze the instruction.
4431       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4432         InstsToReanalyze.push_back(I);
4433         continue;
4434       }
4435 
4436       // Move the instruction to the beginning of the predicated block, and add
4437       // it's operands to the worklist.
4438       I->moveBefore(&*PredBB->getFirstInsertionPt());
4439       Worklist.insert(I->op_begin(), I->op_end());
4440 
4441       // The sinking may have enabled other instructions to be sunk, so we will
4442       // need to iterate.
4443       Changed = true;
4444     }
4445   } while (Changed);
4446 }
4447 
4448 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4449   for (PHINode *OrigPhi : OrigPHIsToFix) {
4450     VPWidenPHIRecipe *VPPhi =
4451         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4452     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4453     // Make sure the builder has a valid insert point.
4454     Builder.SetInsertPoint(NewPhi);
4455     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4456       VPValue *Inc = VPPhi->getIncomingValue(i);
4457       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4458       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4459     }
4460   }
4461 }
4462 
4463 bool InnerLoopVectorizer::useOrderedReductions(
4464     const RecurrenceDescriptor &RdxDesc) {
4465   return Cost->useOrderedReductions(RdxDesc);
4466 }
4467 
4468 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4469                                               VPWidenPHIRecipe *PhiR,
4470                                               VPTransformState &State) {
4471   PHINode *P = cast<PHINode>(PN);
4472   if (EnableVPlanNativePath) {
4473     // Currently we enter here in the VPlan-native path for non-induction
4474     // PHIs where all control flow is uniform. We simply widen these PHIs.
4475     // Create a vector phi with no operands - the vector phi operands will be
4476     // set at the end of vector code generation.
4477     Type *VecTy = (State.VF.isScalar())
4478                       ? PN->getType()
4479                       : VectorType::get(PN->getType(), State.VF);
4480     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4481     State.set(PhiR, VecPhi, 0);
4482     OrigPHIsToFix.push_back(P);
4483 
4484     return;
4485   }
4486 
4487   assert(PN->getParent() == OrigLoop->getHeader() &&
4488          "Non-header phis should have been handled elsewhere");
4489 
4490   // In order to support recurrences we need to be able to vectorize Phi nodes.
4491   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4492   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4493   // this value when we vectorize all of the instructions that use the PHI.
4494 
4495   assert(!Legal->isReductionVariable(P) &&
4496          "reductions should be handled elsewhere");
4497 
4498   setDebugLocFromInst(P);
4499 
4500   // This PHINode must be an induction variable.
4501   // Make sure that we know about it.
4502   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4503 
4504   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4505   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4506 
4507   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4508   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4509 
4510   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4511   // which can be found from the original scalar operations.
4512   switch (II.getKind()) {
4513   case InductionDescriptor::IK_NoInduction:
4514     llvm_unreachable("Unknown induction");
4515   case InductionDescriptor::IK_IntInduction:
4516   case InductionDescriptor::IK_FpInduction:
4517     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4518   case InductionDescriptor::IK_PtrInduction: {
4519     // Handle the pointer induction variable case.
4520     assert(P->getType()->isPointerTy() && "Unexpected type.");
4521 
4522     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4523       // This is the normalized GEP that starts counting at zero.
4524       Value *PtrInd =
4525           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4526       // Determine the number of scalars we need to generate for each unroll
4527       // iteration. If the instruction is uniform, we only need to generate the
4528       // first lane. Otherwise, we generate all VF values.
4529       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4530       assert((IsUniform || !State.VF.isScalable()) &&
4531              "Cannot scalarize a scalable VF");
4532       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4533 
4534       for (unsigned Part = 0; Part < UF; ++Part) {
4535         Value *PartStart =
4536             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4537 
4538         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4539           Value *Idx = Builder.CreateAdd(
4540               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4541           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4542           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(),
4543                                                 DL, II, State.CFG.PrevBB);
4544           SclrGep->setName("next.gep");
4545           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4546         }
4547       }
4548       return;
4549     }
4550     assert(isa<SCEVConstant>(II.getStep()) &&
4551            "Induction step not a SCEV constant!");
4552     Type *PhiType = II.getStep()->getType();
4553 
4554     // Build a pointer phi
4555     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4556     Type *ScStValueType = ScalarStartValue->getType();
4557     PHINode *NewPointerPhi =
4558         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4559     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4560 
4561     // A pointer induction, performed by using a gep
4562     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4563     Instruction *InductionLoc = LoopLatch->getTerminator();
4564     const SCEV *ScalarStep = II.getStep();
4565     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4566     Value *ScalarStepValue =
4567         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4568     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4569     Value *NumUnrolledElems =
4570         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4571     Value *InductionGEP = GetElementPtrInst::Create(
4572         II.getElementType(), NewPointerPhi,
4573         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4574         InductionLoc);
4575     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4576 
4577     // Create UF many actual address geps that use the pointer
4578     // phi as base and a vectorized version of the step value
4579     // (<step*0, ..., step*N>) as offset.
4580     for (unsigned Part = 0; Part < State.UF; ++Part) {
4581       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4582       Value *StartOffsetScalar =
4583           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4584       Value *StartOffset =
4585           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4586       // Create a vector of consecutive numbers from zero to VF.
4587       StartOffset =
4588           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4589 
4590       Value *GEP = Builder.CreateGEP(
4591           II.getElementType(), NewPointerPhi,
4592           Builder.CreateMul(
4593               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4594               "vector.gep"));
4595       State.set(PhiR, GEP, Part);
4596     }
4597   }
4598   }
4599 }
4600 
4601 /// A helper function for checking whether an integer division-related
4602 /// instruction may divide by zero (in which case it must be predicated if
4603 /// executed conditionally in the scalar code).
4604 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4605 /// Non-zero divisors that are non compile-time constants will not be
4606 /// converted into multiplication, so we will still end up scalarizing
4607 /// the division, but can do so w/o predication.
4608 static bool mayDivideByZero(Instruction &I) {
4609   assert((I.getOpcode() == Instruction::UDiv ||
4610           I.getOpcode() == Instruction::SDiv ||
4611           I.getOpcode() == Instruction::URem ||
4612           I.getOpcode() == Instruction::SRem) &&
4613          "Unexpected instruction");
4614   Value *Divisor = I.getOperand(1);
4615   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4616   return !CInt || CInt->isZero();
4617 }
4618 
4619 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4620                                                VPUser &ArgOperands,
4621                                                VPTransformState &State) {
4622   assert(!isa<DbgInfoIntrinsic>(I) &&
4623          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4624   setDebugLocFromInst(&I);
4625 
4626   Module *M = I.getParent()->getParent()->getParent();
4627   auto *CI = cast<CallInst>(&I);
4628 
4629   SmallVector<Type *, 4> Tys;
4630   for (Value *ArgOperand : CI->args())
4631     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4632 
4633   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4634 
4635   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4636   // version of the instruction.
4637   // Is it beneficial to perform intrinsic call compared to lib call?
4638   bool NeedToScalarize = false;
4639   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4640   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4641   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4642   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4643          "Instruction should be scalarized elsewhere.");
4644   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4645          "Either the intrinsic cost or vector call cost must be valid");
4646 
4647   for (unsigned Part = 0; Part < UF; ++Part) {
4648     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4649     SmallVector<Value *, 4> Args;
4650     for (auto &I : enumerate(ArgOperands.operands())) {
4651       // Some intrinsics have a scalar argument - don't replace it with a
4652       // vector.
4653       Value *Arg;
4654       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4655         Arg = State.get(I.value(), Part);
4656       else {
4657         Arg = State.get(I.value(), VPIteration(0, 0));
4658         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4659           TysForDecl.push_back(Arg->getType());
4660       }
4661       Args.push_back(Arg);
4662     }
4663 
4664     Function *VectorF;
4665     if (UseVectorIntrinsic) {
4666       // Use vector version of the intrinsic.
4667       if (VF.isVector())
4668         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4669       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4670       assert(VectorF && "Can't retrieve vector intrinsic.");
4671     } else {
4672       // Use vector version of the function call.
4673       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4674 #ifndef NDEBUG
4675       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4676              "Can't create vector function.");
4677 #endif
4678         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4679     }
4680       SmallVector<OperandBundleDef, 1> OpBundles;
4681       CI->getOperandBundlesAsDefs(OpBundles);
4682       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4683 
4684       if (isa<FPMathOperator>(V))
4685         V->copyFastMathFlags(CI);
4686 
4687       State.set(Def, V, Part);
4688       addMetadata(V, &I);
4689   }
4690 }
4691 
4692 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4693   // We should not collect Scalars more than once per VF. Right now, this
4694   // function is called from collectUniformsAndScalars(), which already does
4695   // this check. Collecting Scalars for VF=1 does not make any sense.
4696   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4697          "This function should not be visited twice for the same VF");
4698 
4699   SmallSetVector<Instruction *, 8> Worklist;
4700 
4701   // These sets are used to seed the analysis with pointers used by memory
4702   // accesses that will remain scalar.
4703   SmallSetVector<Instruction *, 8> ScalarPtrs;
4704   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4705   auto *Latch = TheLoop->getLoopLatch();
4706 
4707   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4708   // The pointer operands of loads and stores will be scalar as long as the
4709   // memory access is not a gather or scatter operation. The value operand of a
4710   // store will remain scalar if the store is scalarized.
4711   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4712     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4713     assert(WideningDecision != CM_Unknown &&
4714            "Widening decision should be ready at this moment");
4715     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4716       if (Ptr == Store->getValueOperand())
4717         return WideningDecision == CM_Scalarize;
4718     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4719            "Ptr is neither a value or pointer operand");
4720     return WideningDecision != CM_GatherScatter;
4721   };
4722 
4723   // A helper that returns true if the given value is a bitcast or
4724   // getelementptr instruction contained in the loop.
4725   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4726     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4727             isa<GetElementPtrInst>(V)) &&
4728            !TheLoop->isLoopInvariant(V);
4729   };
4730 
4731   // A helper that evaluates a memory access's use of a pointer. If the use will
4732   // be a scalar use and the pointer is only used by memory accesses, we place
4733   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4734   // PossibleNonScalarPtrs.
4735   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4736     // We only care about bitcast and getelementptr instructions contained in
4737     // the loop.
4738     if (!isLoopVaryingBitCastOrGEP(Ptr))
4739       return;
4740 
4741     // If the pointer has already been identified as scalar (e.g., if it was
4742     // also identified as uniform), there's nothing to do.
4743     auto *I = cast<Instruction>(Ptr);
4744     if (Worklist.count(I))
4745       return;
4746 
4747     // If the use of the pointer will be a scalar use, and all users of the
4748     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4749     // place the pointer in PossibleNonScalarPtrs.
4750     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4751           return isa<LoadInst>(U) || isa<StoreInst>(U);
4752         }))
4753       ScalarPtrs.insert(I);
4754     else
4755       PossibleNonScalarPtrs.insert(I);
4756   };
4757 
4758   // We seed the scalars analysis with three classes of instructions: (1)
4759   // instructions marked uniform-after-vectorization and (2) bitcast,
4760   // getelementptr and (pointer) phi instructions used by memory accesses
4761   // requiring a scalar use.
4762   //
4763   // (1) Add to the worklist all instructions that have been identified as
4764   // uniform-after-vectorization.
4765   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4766 
4767   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4768   // memory accesses requiring a scalar use. The pointer operands of loads and
4769   // stores will be scalar as long as the memory accesses is not a gather or
4770   // scatter operation. The value operand of a store will remain scalar if the
4771   // store is scalarized.
4772   for (auto *BB : TheLoop->blocks())
4773     for (auto &I : *BB) {
4774       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4775         evaluatePtrUse(Load, Load->getPointerOperand());
4776       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4777         evaluatePtrUse(Store, Store->getPointerOperand());
4778         evaluatePtrUse(Store, Store->getValueOperand());
4779       }
4780     }
4781   for (auto *I : ScalarPtrs)
4782     if (!PossibleNonScalarPtrs.count(I)) {
4783       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4784       Worklist.insert(I);
4785     }
4786 
4787   // Insert the forced scalars.
4788   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4789   // induction variable when the PHI user is scalarized.
4790   auto ForcedScalar = ForcedScalars.find(VF);
4791   if (ForcedScalar != ForcedScalars.end())
4792     for (auto *I : ForcedScalar->second)
4793       Worklist.insert(I);
4794 
4795   // Expand the worklist by looking through any bitcasts and getelementptr
4796   // instructions we've already identified as scalar. This is similar to the
4797   // expansion step in collectLoopUniforms(); however, here we're only
4798   // expanding to include additional bitcasts and getelementptr instructions.
4799   unsigned Idx = 0;
4800   while (Idx != Worklist.size()) {
4801     Instruction *Dst = Worklist[Idx++];
4802     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4803       continue;
4804     auto *Src = cast<Instruction>(Dst->getOperand(0));
4805     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4806           auto *J = cast<Instruction>(U);
4807           return !TheLoop->contains(J) || Worklist.count(J) ||
4808                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4809                   isScalarUse(J, Src));
4810         })) {
4811       Worklist.insert(Src);
4812       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4813     }
4814   }
4815 
4816   // An induction variable will remain scalar if all users of the induction
4817   // variable and induction variable update remain scalar.
4818   for (auto &Induction : Legal->getInductionVars()) {
4819     auto *Ind = Induction.first;
4820     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4821 
4822     // If tail-folding is applied, the primary induction variable will be used
4823     // to feed a vector compare.
4824     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4825       continue;
4826 
4827     // Returns true if \p Indvar is a pointer induction that is used directly by
4828     // load/store instruction \p I.
4829     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4830                                               Instruction *I) {
4831       return Induction.second.getKind() ==
4832                  InductionDescriptor::IK_PtrInduction &&
4833              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4834              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4835     };
4836 
4837     // Determine if all users of the induction variable are scalar after
4838     // vectorization.
4839     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4840       auto *I = cast<Instruction>(U);
4841       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4842              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4843     });
4844     if (!ScalarInd)
4845       continue;
4846 
4847     // Determine if all users of the induction variable update instruction are
4848     // scalar after vectorization.
4849     auto ScalarIndUpdate =
4850         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4851           auto *I = cast<Instruction>(U);
4852           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4853                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4854         });
4855     if (!ScalarIndUpdate)
4856       continue;
4857 
4858     // The induction variable and its update instruction will remain scalar.
4859     Worklist.insert(Ind);
4860     Worklist.insert(IndUpdate);
4861     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4862     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4863                       << "\n");
4864   }
4865 
4866   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4867 }
4868 
4869 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
4870   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4871     return false;
4872   switch(I->getOpcode()) {
4873   default:
4874     break;
4875   case Instruction::Load:
4876   case Instruction::Store: {
4877     if (!Legal->isMaskRequired(I))
4878       return false;
4879     auto *Ptr = getLoadStorePointerOperand(I);
4880     auto *Ty = getLoadStoreType(I);
4881     const Align Alignment = getLoadStoreAlignment(I);
4882     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4883                                 TTI.isLegalMaskedGather(Ty, Alignment))
4884                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4885                                 TTI.isLegalMaskedScatter(Ty, Alignment));
4886   }
4887   case Instruction::UDiv:
4888   case Instruction::SDiv:
4889   case Instruction::SRem:
4890   case Instruction::URem:
4891     return mayDivideByZero(*I);
4892   }
4893   return false;
4894 }
4895 
4896 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4897     Instruction *I, ElementCount VF) {
4898   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4899   assert(getWideningDecision(I, VF) == CM_Unknown &&
4900          "Decision should not be set yet.");
4901   auto *Group = getInterleavedAccessGroup(I);
4902   assert(Group && "Must have a group.");
4903 
4904   // If the instruction's allocated size doesn't equal it's type size, it
4905   // requires padding and will be scalarized.
4906   auto &DL = I->getModule()->getDataLayout();
4907   auto *ScalarTy = getLoadStoreType(I);
4908   if (hasIrregularType(ScalarTy, DL))
4909     return false;
4910 
4911   // Check if masking is required.
4912   // A Group may need masking for one of two reasons: it resides in a block that
4913   // needs predication, or it was decided to use masking to deal with gaps
4914   // (either a gap at the end of a load-access that may result in a speculative
4915   // load, or any gaps in a store-access).
4916   bool PredicatedAccessRequiresMasking =
4917       blockNeedsPredicationForAnyReason(I->getParent()) &&
4918       Legal->isMaskRequired(I);
4919   bool LoadAccessWithGapsRequiresEpilogMasking =
4920       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4921       !isScalarEpilogueAllowed();
4922   bool StoreAccessWithGapsRequiresMasking =
4923       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4924   if (!PredicatedAccessRequiresMasking &&
4925       !LoadAccessWithGapsRequiresEpilogMasking &&
4926       !StoreAccessWithGapsRequiresMasking)
4927     return true;
4928 
4929   // If masked interleaving is required, we expect that the user/target had
4930   // enabled it, because otherwise it either wouldn't have been created or
4931   // it should have been invalidated by the CostModel.
4932   assert(useMaskedInterleavedAccesses(TTI) &&
4933          "Masked interleave-groups for predicated accesses are not enabled.");
4934 
4935   if (Group->isReverse())
4936     return false;
4937 
4938   auto *Ty = getLoadStoreType(I);
4939   const Align Alignment = getLoadStoreAlignment(I);
4940   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4941                           : TTI.isLegalMaskedStore(Ty, Alignment);
4942 }
4943 
4944 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4945     Instruction *I, ElementCount VF) {
4946   // Get and ensure we have a valid memory instruction.
4947   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4948 
4949   auto *Ptr = getLoadStorePointerOperand(I);
4950   auto *ScalarTy = getLoadStoreType(I);
4951 
4952   // In order to be widened, the pointer should be consecutive, first of all.
4953   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4954     return false;
4955 
4956   // If the instruction is a store located in a predicated block, it will be
4957   // scalarized.
4958   if (isScalarWithPredication(I))
4959     return false;
4960 
4961   // If the instruction's allocated size doesn't equal it's type size, it
4962   // requires padding and will be scalarized.
4963   auto &DL = I->getModule()->getDataLayout();
4964   if (hasIrregularType(ScalarTy, DL))
4965     return false;
4966 
4967   return true;
4968 }
4969 
4970 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4971   // We should not collect Uniforms more than once per VF. Right now,
4972   // this function is called from collectUniformsAndScalars(), which
4973   // already does this check. Collecting Uniforms for VF=1 does not make any
4974   // sense.
4975 
4976   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4977          "This function should not be visited twice for the same VF");
4978 
4979   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4980   // not analyze again.  Uniforms.count(VF) will return 1.
4981   Uniforms[VF].clear();
4982 
4983   // We now know that the loop is vectorizable!
4984   // Collect instructions inside the loop that will remain uniform after
4985   // vectorization.
4986 
4987   // Global values, params and instructions outside of current loop are out of
4988   // scope.
4989   auto isOutOfScope = [&](Value *V) -> bool {
4990     Instruction *I = dyn_cast<Instruction>(V);
4991     return (!I || !TheLoop->contains(I));
4992   };
4993 
4994   // Worklist containing uniform instructions demanding lane 0.
4995   SetVector<Instruction *> Worklist;
4996   BasicBlock *Latch = TheLoop->getLoopLatch();
4997 
4998   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4999   // that are scalar with predication must not be considered uniform after
5000   // vectorization, because that would create an erroneous replicating region
5001   // where only a single instance out of VF should be formed.
5002   // TODO: optimize such seldom cases if found important, see PR40816.
5003   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5004     if (isOutOfScope(I)) {
5005       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5006                         << *I << "\n");
5007       return;
5008     }
5009     if (isScalarWithPredication(I)) {
5010       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5011                         << *I << "\n");
5012       return;
5013     }
5014     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5015     Worklist.insert(I);
5016   };
5017 
5018   // Start with the conditional branch. If the branch condition is an
5019   // instruction contained in the loop that is only used by the branch, it is
5020   // uniform.
5021   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5022   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5023     addToWorklistIfAllowed(Cmp);
5024 
5025   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5026     InstWidening WideningDecision = getWideningDecision(I, VF);
5027     assert(WideningDecision != CM_Unknown &&
5028            "Widening decision should be ready at this moment");
5029 
5030     // A uniform memory op is itself uniform.  We exclude uniform stores
5031     // here as they demand the last lane, not the first one.
5032     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5033       assert(WideningDecision == CM_Scalarize);
5034       return true;
5035     }
5036 
5037     return (WideningDecision == CM_Widen ||
5038             WideningDecision == CM_Widen_Reverse ||
5039             WideningDecision == CM_Interleave);
5040   };
5041 
5042 
5043   // Returns true if Ptr is the pointer operand of a memory access instruction
5044   // I, and I is known to not require scalarization.
5045   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5046     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5047   };
5048 
5049   // Holds a list of values which are known to have at least one uniform use.
5050   // Note that there may be other uses which aren't uniform.  A "uniform use"
5051   // here is something which only demands lane 0 of the unrolled iterations;
5052   // it does not imply that all lanes produce the same value (e.g. this is not
5053   // the usual meaning of uniform)
5054   SetVector<Value *> HasUniformUse;
5055 
5056   // Scan the loop for instructions which are either a) known to have only
5057   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5058   for (auto *BB : TheLoop->blocks())
5059     for (auto &I : *BB) {
5060       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5061         switch (II->getIntrinsicID()) {
5062         case Intrinsic::sideeffect:
5063         case Intrinsic::experimental_noalias_scope_decl:
5064         case Intrinsic::assume:
5065         case Intrinsic::lifetime_start:
5066         case Intrinsic::lifetime_end:
5067           if (TheLoop->hasLoopInvariantOperands(&I))
5068             addToWorklistIfAllowed(&I);
5069           break;
5070         default:
5071           break;
5072         }
5073       }
5074 
5075       // ExtractValue instructions must be uniform, because the operands are
5076       // known to be loop-invariant.
5077       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5078         assert(isOutOfScope(EVI->getAggregateOperand()) &&
5079                "Expected aggregate value to be loop invariant");
5080         addToWorklistIfAllowed(EVI);
5081         continue;
5082       }
5083 
5084       // If there's no pointer operand, there's nothing to do.
5085       auto *Ptr = getLoadStorePointerOperand(&I);
5086       if (!Ptr)
5087         continue;
5088 
5089       // A uniform memory op is itself uniform.  We exclude uniform stores
5090       // here as they demand the last lane, not the first one.
5091       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5092         addToWorklistIfAllowed(&I);
5093 
5094       if (isUniformDecision(&I, VF)) {
5095         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5096         HasUniformUse.insert(Ptr);
5097       }
5098     }
5099 
5100   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5101   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5102   // disallows uses outside the loop as well.
5103   for (auto *V : HasUniformUse) {
5104     if (isOutOfScope(V))
5105       continue;
5106     auto *I = cast<Instruction>(V);
5107     auto UsersAreMemAccesses =
5108       llvm::all_of(I->users(), [&](User *U) -> bool {
5109         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5110       });
5111     if (UsersAreMemAccesses)
5112       addToWorklistIfAllowed(I);
5113   }
5114 
5115   // Expand Worklist in topological order: whenever a new instruction
5116   // is added , its users should be already inside Worklist.  It ensures
5117   // a uniform instruction will only be used by uniform instructions.
5118   unsigned idx = 0;
5119   while (idx != Worklist.size()) {
5120     Instruction *I = Worklist[idx++];
5121 
5122     for (auto OV : I->operand_values()) {
5123       // isOutOfScope operands cannot be uniform instructions.
5124       if (isOutOfScope(OV))
5125         continue;
5126       // First order recurrence Phi's should typically be considered
5127       // non-uniform.
5128       auto *OP = dyn_cast<PHINode>(OV);
5129       if (OP && Legal->isFirstOrderRecurrence(OP))
5130         continue;
5131       // If all the users of the operand are uniform, then add the
5132       // operand into the uniform worklist.
5133       auto *OI = cast<Instruction>(OV);
5134       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5135             auto *J = cast<Instruction>(U);
5136             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5137           }))
5138         addToWorklistIfAllowed(OI);
5139     }
5140   }
5141 
5142   // For an instruction to be added into Worklist above, all its users inside
5143   // the loop should also be in Worklist. However, this condition cannot be
5144   // true for phi nodes that form a cyclic dependence. We must process phi
5145   // nodes separately. An induction variable will remain uniform if all users
5146   // of the induction variable and induction variable update remain uniform.
5147   // The code below handles both pointer and non-pointer induction variables.
5148   for (auto &Induction : Legal->getInductionVars()) {
5149     auto *Ind = Induction.first;
5150     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5151 
5152     // Determine if all users of the induction variable are uniform after
5153     // vectorization.
5154     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5155       auto *I = cast<Instruction>(U);
5156       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5157              isVectorizedMemAccessUse(I, Ind);
5158     });
5159     if (!UniformInd)
5160       continue;
5161 
5162     // Determine if all users of the induction variable update instruction are
5163     // uniform after vectorization.
5164     auto UniformIndUpdate =
5165         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5166           auto *I = cast<Instruction>(U);
5167           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5168                  isVectorizedMemAccessUse(I, IndUpdate);
5169         });
5170     if (!UniformIndUpdate)
5171       continue;
5172 
5173     // The induction variable and its update instruction will remain uniform.
5174     addToWorklistIfAllowed(Ind);
5175     addToWorklistIfAllowed(IndUpdate);
5176   }
5177 
5178   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5179 }
5180 
5181 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5182   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5183 
5184   if (Legal->getRuntimePointerChecking()->Need) {
5185     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5186         "runtime pointer checks needed. Enable vectorization of this "
5187         "loop with '#pragma clang loop vectorize(enable)' when "
5188         "compiling with -Os/-Oz",
5189         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5190     return true;
5191   }
5192 
5193   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5194     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5195         "runtime SCEV checks needed. Enable vectorization of this "
5196         "loop with '#pragma clang loop vectorize(enable)' when "
5197         "compiling with -Os/-Oz",
5198         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5199     return true;
5200   }
5201 
5202   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5203   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5204     reportVectorizationFailure("Runtime stride check for small trip count",
5205         "runtime stride == 1 checks needed. Enable vectorization of "
5206         "this loop without such check by compiling with -Os/-Oz",
5207         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5208     return true;
5209   }
5210 
5211   return false;
5212 }
5213 
5214 ElementCount
5215 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5216   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5217     return ElementCount::getScalable(0);
5218 
5219   if (Hints->isScalableVectorizationDisabled()) {
5220     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5221                             "ScalableVectorizationDisabled", ORE, TheLoop);
5222     return ElementCount::getScalable(0);
5223   }
5224 
5225   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5226 
5227   auto MaxScalableVF = ElementCount::getScalable(
5228       std::numeric_limits<ElementCount::ScalarTy>::max());
5229 
5230   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5231   // FIXME: While for scalable vectors this is currently sufficient, this should
5232   // be replaced by a more detailed mechanism that filters out specific VFs,
5233   // instead of invalidating vectorization for a whole set of VFs based on the
5234   // MaxVF.
5235 
5236   // Disable scalable vectorization if the loop contains unsupported reductions.
5237   if (!canVectorizeReductions(MaxScalableVF)) {
5238     reportVectorizationInfo(
5239         "Scalable vectorization not supported for the reduction "
5240         "operations found in this loop.",
5241         "ScalableVFUnfeasible", ORE, TheLoop);
5242     return ElementCount::getScalable(0);
5243   }
5244 
5245   // Disable scalable vectorization if the loop contains any instructions
5246   // with element types not supported for scalable vectors.
5247   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5248         return !Ty->isVoidTy() &&
5249                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5250       })) {
5251     reportVectorizationInfo("Scalable vectorization is not supported "
5252                             "for all element types found in this loop.",
5253                             "ScalableVFUnfeasible", ORE, TheLoop);
5254     return ElementCount::getScalable(0);
5255   }
5256 
5257   if (Legal->isSafeForAnyVectorWidth())
5258     return MaxScalableVF;
5259 
5260   // Limit MaxScalableVF by the maximum safe dependence distance.
5261   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5262   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5263     MaxVScale =
5264         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5265   MaxScalableVF = ElementCount::getScalable(
5266       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5267   if (!MaxScalableVF)
5268     reportVectorizationInfo(
5269         "Max legal vector width too small, scalable vectorization "
5270         "unfeasible.",
5271         "ScalableVFUnfeasible", ORE, TheLoop);
5272 
5273   return MaxScalableVF;
5274 }
5275 
5276 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5277     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5278   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5279   unsigned SmallestType, WidestType;
5280   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5281 
5282   // Get the maximum safe dependence distance in bits computed by LAA.
5283   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5284   // the memory accesses that is most restrictive (involved in the smallest
5285   // dependence distance).
5286   unsigned MaxSafeElements =
5287       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5288 
5289   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5290   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5291 
5292   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5293                     << ".\n");
5294   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5295                     << ".\n");
5296 
5297   // First analyze the UserVF, fall back if the UserVF should be ignored.
5298   if (UserVF) {
5299     auto MaxSafeUserVF =
5300         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5301 
5302     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5303       // If `VF=vscale x N` is safe, then so is `VF=N`
5304       if (UserVF.isScalable())
5305         return FixedScalableVFPair(
5306             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5307       else
5308         return UserVF;
5309     }
5310 
5311     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5312 
5313     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5314     // is better to ignore the hint and let the compiler choose a suitable VF.
5315     if (!UserVF.isScalable()) {
5316       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5317                         << " is unsafe, clamping to max safe VF="
5318                         << MaxSafeFixedVF << ".\n");
5319       ORE->emit([&]() {
5320         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5321                                           TheLoop->getStartLoc(),
5322                                           TheLoop->getHeader())
5323                << "User-specified vectorization factor "
5324                << ore::NV("UserVectorizationFactor", UserVF)
5325                << " is unsafe, clamping to maximum safe vectorization factor "
5326                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5327       });
5328       return MaxSafeFixedVF;
5329     }
5330 
5331     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5332       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5333                         << " is ignored because scalable vectors are not "
5334                            "available.\n");
5335       ORE->emit([&]() {
5336         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5337                                           TheLoop->getStartLoc(),
5338                                           TheLoop->getHeader())
5339                << "User-specified vectorization factor "
5340                << ore::NV("UserVectorizationFactor", UserVF)
5341                << " is ignored because the target does not support scalable "
5342                   "vectors. The compiler will pick a more suitable value.";
5343       });
5344     } else {
5345       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5346                         << " is unsafe. Ignoring scalable UserVF.\n");
5347       ORE->emit([&]() {
5348         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5349                                           TheLoop->getStartLoc(),
5350                                           TheLoop->getHeader())
5351                << "User-specified vectorization factor "
5352                << ore::NV("UserVectorizationFactor", UserVF)
5353                << " is unsafe. Ignoring the hint to let the compiler pick a "
5354                   "more suitable value.";
5355       });
5356     }
5357   }
5358 
5359   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5360                     << " / " << WidestType << " bits.\n");
5361 
5362   FixedScalableVFPair Result(ElementCount::getFixed(1),
5363                              ElementCount::getScalable(0));
5364   if (auto MaxVF =
5365           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5366                                   MaxSafeFixedVF, FoldTailByMasking))
5367     Result.FixedVF = MaxVF;
5368 
5369   if (auto MaxVF =
5370           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5371                                   MaxSafeScalableVF, FoldTailByMasking))
5372     if (MaxVF.isScalable()) {
5373       Result.ScalableVF = MaxVF;
5374       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5375                         << "\n");
5376     }
5377 
5378   return Result;
5379 }
5380 
5381 FixedScalableVFPair
5382 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5383   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5384     // TODO: It may by useful to do since it's still likely to be dynamically
5385     // uniform if the target can skip.
5386     reportVectorizationFailure(
5387         "Not inserting runtime ptr check for divergent target",
5388         "runtime pointer checks needed. Not enabled for divergent target",
5389         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5390     return FixedScalableVFPair::getNone();
5391   }
5392 
5393   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5394   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5395   if (TC == 1) {
5396     reportVectorizationFailure("Single iteration (non) loop",
5397         "loop trip count is one, irrelevant for vectorization",
5398         "SingleIterationLoop", ORE, TheLoop);
5399     return FixedScalableVFPair::getNone();
5400   }
5401 
5402   switch (ScalarEpilogueStatus) {
5403   case CM_ScalarEpilogueAllowed:
5404     return computeFeasibleMaxVF(TC, UserVF, false);
5405   case CM_ScalarEpilogueNotAllowedUsePredicate:
5406     LLVM_FALLTHROUGH;
5407   case CM_ScalarEpilogueNotNeededUsePredicate:
5408     LLVM_DEBUG(
5409         dbgs() << "LV: vector predicate hint/switch found.\n"
5410                << "LV: Not allowing scalar epilogue, creating predicated "
5411                << "vector loop.\n");
5412     break;
5413   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5414     // fallthrough as a special case of OptForSize
5415   case CM_ScalarEpilogueNotAllowedOptSize:
5416     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5417       LLVM_DEBUG(
5418           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5419     else
5420       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5421                         << "count.\n");
5422 
5423     // Bail if runtime checks are required, which are not good when optimising
5424     // for size.
5425     if (runtimeChecksRequired())
5426       return FixedScalableVFPair::getNone();
5427 
5428     break;
5429   }
5430 
5431   // The only loops we can vectorize without a scalar epilogue, are loops with
5432   // a bottom-test and a single exiting block. We'd have to handle the fact
5433   // that not every instruction executes on the last iteration.  This will
5434   // require a lane mask which varies through the vector loop body.  (TODO)
5435   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5436     // If there was a tail-folding hint/switch, but we can't fold the tail by
5437     // masking, fallback to a vectorization with a scalar epilogue.
5438     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5439       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5440                            "scalar epilogue instead.\n");
5441       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5442       return computeFeasibleMaxVF(TC, UserVF, false);
5443     }
5444     return FixedScalableVFPair::getNone();
5445   }
5446 
5447   // Now try the tail folding
5448 
5449   // Invalidate interleave groups that require an epilogue if we can't mask
5450   // the interleave-group.
5451   if (!useMaskedInterleavedAccesses(TTI)) {
5452     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5453            "No decisions should have been taken at this point");
5454     // Note: There is no need to invalidate any cost modeling decisions here, as
5455     // non where taken so far.
5456     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5457   }
5458 
5459   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5460   // Avoid tail folding if the trip count is known to be a multiple of any VF
5461   // we chose.
5462   // FIXME: The condition below pessimises the case for fixed-width vectors,
5463   // when scalable VFs are also candidates for vectorization.
5464   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5465     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5466     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5467            "MaxFixedVF must be a power of 2");
5468     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5469                                    : MaxFixedVF.getFixedValue();
5470     ScalarEvolution *SE = PSE.getSE();
5471     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5472     const SCEV *ExitCount = SE->getAddExpr(
5473         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5474     const SCEV *Rem = SE->getURemExpr(
5475         SE->applyLoopGuards(ExitCount, TheLoop),
5476         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5477     if (Rem->isZero()) {
5478       // Accept MaxFixedVF if we do not have a tail.
5479       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5480       return MaxFactors;
5481     }
5482   }
5483 
5484   // For scalable vectors, don't use tail folding as this is currently not yet
5485   // supported. The code is likely to have ended up here if the tripcount is
5486   // low, in which case it makes sense not to use scalable vectors.
5487   if (MaxFactors.ScalableVF.isVector())
5488     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5489 
5490   // If we don't know the precise trip count, or if the trip count that we
5491   // found modulo the vectorization factor is not zero, try to fold the tail
5492   // by masking.
5493   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5494   if (Legal->prepareToFoldTailByMasking()) {
5495     FoldTailByMasking = true;
5496     return MaxFactors;
5497   }
5498 
5499   // If there was a tail-folding hint/switch, but we can't fold the tail by
5500   // masking, fallback to a vectorization with a scalar epilogue.
5501   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5502     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5503                          "scalar epilogue instead.\n");
5504     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5505     return MaxFactors;
5506   }
5507 
5508   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5509     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5510     return FixedScalableVFPair::getNone();
5511   }
5512 
5513   if (TC == 0) {
5514     reportVectorizationFailure(
5515         "Unable to calculate the loop count due to complex control flow",
5516         "unable to calculate the loop count due to complex control flow",
5517         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5518     return FixedScalableVFPair::getNone();
5519   }
5520 
5521   reportVectorizationFailure(
5522       "Cannot optimize for size and vectorize at the same time.",
5523       "cannot optimize for size and vectorize at the same time. "
5524       "Enable vectorization of this loop with '#pragma clang loop "
5525       "vectorize(enable)' when compiling with -Os/-Oz",
5526       "NoTailLoopWithOptForSize", ORE, TheLoop);
5527   return FixedScalableVFPair::getNone();
5528 }
5529 
5530 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5531     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5532     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5533   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5534   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5535       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5536                            : TargetTransformInfo::RGK_FixedWidthVector);
5537 
5538   // Convenience function to return the minimum of two ElementCounts.
5539   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5540     assert((LHS.isScalable() == RHS.isScalable()) &&
5541            "Scalable flags must match");
5542     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5543   };
5544 
5545   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5546   // Note that both WidestRegister and WidestType may not be a powers of 2.
5547   auto MaxVectorElementCount = ElementCount::get(
5548       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5549       ComputeScalableMaxVF);
5550   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5551   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5552                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5553 
5554   if (!MaxVectorElementCount) {
5555     LLVM_DEBUG(dbgs() << "LV: The target has no "
5556                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5557                       << " vector registers.\n");
5558     return ElementCount::getFixed(1);
5559   }
5560 
5561   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5562   if (ConstTripCount &&
5563       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5564       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5565     // If loop trip count (TC) is known at compile time there is no point in
5566     // choosing VF greater than TC (as done in the loop below). Select maximum
5567     // power of two which doesn't exceed TC.
5568     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5569     // when the TC is less than or equal to the known number of lanes.
5570     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5571     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5572                          "exceeding the constant trip count: "
5573                       << ClampedConstTripCount << "\n");
5574     return ElementCount::getFixed(ClampedConstTripCount);
5575   }
5576 
5577   ElementCount MaxVF = MaxVectorElementCount;
5578   if (TTI.shouldMaximizeVectorBandwidth() ||
5579       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5580     auto MaxVectorElementCountMaxBW = ElementCount::get(
5581         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5582         ComputeScalableMaxVF);
5583     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5584 
5585     // Collect all viable vectorization factors larger than the default MaxVF
5586     // (i.e. MaxVectorElementCount).
5587     SmallVector<ElementCount, 8> VFs;
5588     for (ElementCount VS = MaxVectorElementCount * 2;
5589          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5590       VFs.push_back(VS);
5591 
5592     // For each VF calculate its register usage.
5593     auto RUs = calculateRegisterUsage(VFs);
5594 
5595     // Select the largest VF which doesn't require more registers than existing
5596     // ones.
5597     for (int i = RUs.size() - 1; i >= 0; --i) {
5598       bool Selected = true;
5599       for (auto &pair : RUs[i].MaxLocalUsers) {
5600         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5601         if (pair.second > TargetNumRegisters)
5602           Selected = false;
5603       }
5604       if (Selected) {
5605         MaxVF = VFs[i];
5606         break;
5607       }
5608     }
5609     if (ElementCount MinVF =
5610             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5611       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5612         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5613                           << ") with target's minimum: " << MinVF << '\n');
5614         MaxVF = MinVF;
5615       }
5616     }
5617   }
5618   return MaxVF;
5619 }
5620 
5621 bool LoopVectorizationCostModel::isMoreProfitable(
5622     const VectorizationFactor &A, const VectorizationFactor &B) const {
5623   InstructionCost CostA = A.Cost;
5624   InstructionCost CostB = B.Cost;
5625 
5626   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5627 
5628   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5629       MaxTripCount) {
5630     // If we are folding the tail and the trip count is a known (possibly small)
5631     // constant, the trip count will be rounded up to an integer number of
5632     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5633     // which we compare directly. When not folding the tail, the total cost will
5634     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5635     // approximated with the per-lane cost below instead of using the tripcount
5636     // as here.
5637     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5638     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5639     return RTCostA < RTCostB;
5640   }
5641 
5642   // Improve estimate for the vector width if it is scalable.
5643   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5644   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5645   if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) {
5646     if (A.Width.isScalable())
5647       EstimatedWidthA *= VScale.getValue();
5648     if (B.Width.isScalable())
5649       EstimatedWidthB *= VScale.getValue();
5650   }
5651 
5652   // Assume vscale may be larger than 1 (or the value being tuned for),
5653   // so that scalable vectorization is slightly favorable over fixed-width
5654   // vectorization.
5655   if (A.Width.isScalable() && !B.Width.isScalable())
5656     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5657 
5658   // To avoid the need for FP division:
5659   //      (CostA / A.Width) < (CostB / B.Width)
5660   // <=>  (CostA * B.Width) < (CostB * A.Width)
5661   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5662 }
5663 
5664 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5665     const ElementCountSet &VFCandidates) {
5666   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5667   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5668   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5669   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5670          "Expected Scalar VF to be a candidate");
5671 
5672   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5673   VectorizationFactor ChosenFactor = ScalarCost;
5674 
5675   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5676   if (ForceVectorization && VFCandidates.size() > 1) {
5677     // Ignore scalar width, because the user explicitly wants vectorization.
5678     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5679     // evaluation.
5680     ChosenFactor.Cost = InstructionCost::getMax();
5681   }
5682 
5683   SmallVector<InstructionVFPair> InvalidCosts;
5684   for (const auto &i : VFCandidates) {
5685     // The cost for scalar VF=1 is already calculated, so ignore it.
5686     if (i.isScalar())
5687       continue;
5688 
5689     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5690     VectorizationFactor Candidate(i, C.first);
5691 
5692 #ifndef NDEBUG
5693     unsigned AssumedMinimumVscale = 1;
5694     if (Optional<unsigned> VScale = TTI.getVScaleForTuning())
5695       AssumedMinimumVscale = VScale.getValue();
5696     unsigned Width =
5697         Candidate.Width.isScalable()
5698             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5699             : Candidate.Width.getFixedValue();
5700     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5701                       << " costs: " << (Candidate.Cost / Width));
5702     if (i.isScalable())
5703       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5704                         << AssumedMinimumVscale << ")");
5705     LLVM_DEBUG(dbgs() << ".\n");
5706 #endif
5707 
5708     if (!C.second && !ForceVectorization) {
5709       LLVM_DEBUG(
5710           dbgs() << "LV: Not considering vector loop of width " << i
5711                  << " because it will not generate any vector instructions.\n");
5712       continue;
5713     }
5714 
5715     // If profitable add it to ProfitableVF list.
5716     if (isMoreProfitable(Candidate, ScalarCost))
5717       ProfitableVFs.push_back(Candidate);
5718 
5719     if (isMoreProfitable(Candidate, ChosenFactor))
5720       ChosenFactor = Candidate;
5721   }
5722 
5723   // Emit a report of VFs with invalid costs in the loop.
5724   if (!InvalidCosts.empty()) {
5725     // Group the remarks per instruction, keeping the instruction order from
5726     // InvalidCosts.
5727     std::map<Instruction *, unsigned> Numbering;
5728     unsigned I = 0;
5729     for (auto &Pair : InvalidCosts)
5730       if (!Numbering.count(Pair.first))
5731         Numbering[Pair.first] = I++;
5732 
5733     // Sort the list, first on instruction(number) then on VF.
5734     llvm::sort(InvalidCosts,
5735                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5736                  if (Numbering[A.first] != Numbering[B.first])
5737                    return Numbering[A.first] < Numbering[B.first];
5738                  ElementCountComparator ECC;
5739                  return ECC(A.second, B.second);
5740                });
5741 
5742     // For a list of ordered instruction-vf pairs:
5743     //   [(load, vf1), (load, vf2), (store, vf1)]
5744     // Group the instructions together to emit separate remarks for:
5745     //   load  (vf1, vf2)
5746     //   store (vf1)
5747     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5748     auto Subset = ArrayRef<InstructionVFPair>();
5749     do {
5750       if (Subset.empty())
5751         Subset = Tail.take_front(1);
5752 
5753       Instruction *I = Subset.front().first;
5754 
5755       // If the next instruction is different, or if there are no other pairs,
5756       // emit a remark for the collated subset. e.g.
5757       //   [(load, vf1), (load, vf2))]
5758       // to emit:
5759       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5760       if (Subset == Tail || Tail[Subset.size()].first != I) {
5761         std::string OutString;
5762         raw_string_ostream OS(OutString);
5763         assert(!Subset.empty() && "Unexpected empty range");
5764         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5765         for (auto &Pair : Subset)
5766           OS << (Pair.second == Subset.front().second ? "" : ", ")
5767              << Pair.second;
5768         OS << "):";
5769         if (auto *CI = dyn_cast<CallInst>(I))
5770           OS << " call to " << CI->getCalledFunction()->getName();
5771         else
5772           OS << " " << I->getOpcodeName();
5773         OS.flush();
5774         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5775         Tail = Tail.drop_front(Subset.size());
5776         Subset = {};
5777       } else
5778         // Grow the subset by one element
5779         Subset = Tail.take_front(Subset.size() + 1);
5780     } while (!Tail.empty());
5781   }
5782 
5783   if (!EnableCondStoresVectorization && NumPredStores) {
5784     reportVectorizationFailure("There are conditional stores.",
5785         "store that is conditionally executed prevents vectorization",
5786         "ConditionalStore", ORE, TheLoop);
5787     ChosenFactor = ScalarCost;
5788   }
5789 
5790   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5791                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5792              << "LV: Vectorization seems to be not beneficial, "
5793              << "but was forced by a user.\n");
5794   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5795   return ChosenFactor;
5796 }
5797 
5798 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5799     const Loop &L, ElementCount VF) const {
5800   // Cross iteration phis such as reductions need special handling and are
5801   // currently unsupported.
5802   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5803         return Legal->isFirstOrderRecurrence(&Phi) ||
5804                Legal->isReductionVariable(&Phi);
5805       }))
5806     return false;
5807 
5808   // Phis with uses outside of the loop require special handling and are
5809   // currently unsupported.
5810   for (auto &Entry : Legal->getInductionVars()) {
5811     // Look for uses of the value of the induction at the last iteration.
5812     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5813     for (User *U : PostInc->users())
5814       if (!L.contains(cast<Instruction>(U)))
5815         return false;
5816     // Look for uses of penultimate value of the induction.
5817     for (User *U : Entry.first->users())
5818       if (!L.contains(cast<Instruction>(U)))
5819         return false;
5820   }
5821 
5822   // Induction variables that are widened require special handling that is
5823   // currently not supported.
5824   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5825         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5826                  this->isProfitableToScalarize(Entry.first, VF));
5827       }))
5828     return false;
5829 
5830   // Epilogue vectorization code has not been auditted to ensure it handles
5831   // non-latch exits properly.  It may be fine, but it needs auditted and
5832   // tested.
5833   if (L.getExitingBlock() != L.getLoopLatch())
5834     return false;
5835 
5836   return true;
5837 }
5838 
5839 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5840     const ElementCount VF) const {
5841   // FIXME: We need a much better cost-model to take different parameters such
5842   // as register pressure, code size increase and cost of extra branches into
5843   // account. For now we apply a very crude heuristic and only consider loops
5844   // with vectorization factors larger than a certain value.
5845   // We also consider epilogue vectorization unprofitable for targets that don't
5846   // consider interleaving beneficial (eg. MVE).
5847   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5848     return false;
5849   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5850     return true;
5851   return false;
5852 }
5853 
5854 VectorizationFactor
5855 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5856     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5857   VectorizationFactor Result = VectorizationFactor::Disabled();
5858   if (!EnableEpilogueVectorization) {
5859     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5860     return Result;
5861   }
5862 
5863   if (!isScalarEpilogueAllowed()) {
5864     LLVM_DEBUG(
5865         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5866                   "allowed.\n";);
5867     return Result;
5868   }
5869 
5870   // Not really a cost consideration, but check for unsupported cases here to
5871   // simplify the logic.
5872   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5873     LLVM_DEBUG(
5874         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5875                   "not a supported candidate.\n";);
5876     return Result;
5877   }
5878 
5879   if (EpilogueVectorizationForceVF > 1) {
5880     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5881     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5882     if (LVP.hasPlanWithVF(ForcedEC))
5883       return {ForcedEC, 0};
5884     else {
5885       LLVM_DEBUG(
5886           dbgs()
5887               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5888       return Result;
5889     }
5890   }
5891 
5892   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5893       TheLoop->getHeader()->getParent()->hasMinSize()) {
5894     LLVM_DEBUG(
5895         dbgs()
5896             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5897     return Result;
5898   }
5899 
5900   auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5901   if (MainLoopVF.isScalable())
5902     LLVM_DEBUG(
5903         dbgs() << "LEV: Epilogue vectorization using scalable vectors not "
5904                   "yet supported. Converting to fixed-width (VF="
5905                << FixedMainLoopVF << ") instead\n");
5906 
5907   if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) {
5908     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5909                          "this loop\n");
5910     return Result;
5911   }
5912 
5913   for (auto &NextVF : ProfitableVFs)
5914     if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) &&
5915         (Result.Width.getFixedValue() == 1 ||
5916          isMoreProfitable(NextVF, Result)) &&
5917         LVP.hasPlanWithVF(NextVF.Width))
5918       Result = NextVF;
5919 
5920   if (Result != VectorizationFactor::Disabled())
5921     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5922                       << Result.Width.getFixedValue() << "\n";);
5923   return Result;
5924 }
5925 
5926 std::pair<unsigned, unsigned>
5927 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5928   unsigned MinWidth = -1U;
5929   unsigned MaxWidth = 8;
5930   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5931   // For in-loop reductions, no element types are added to ElementTypesInLoop
5932   // if there are no loads/stores in the loop. In this case, check through the
5933   // reduction variables to determine the maximum width.
5934   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5935     // Reset MaxWidth so that we can find the smallest type used by recurrences
5936     // in the loop.
5937     MaxWidth = -1U;
5938     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5939       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5940       // When finding the min width used by the recurrence we need to account
5941       // for casts on the input operands of the recurrence.
5942       MaxWidth = std::min<unsigned>(
5943           MaxWidth, std::min<unsigned>(
5944                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5945                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5946     }
5947   } else {
5948     for (Type *T : ElementTypesInLoop) {
5949       MinWidth = std::min<unsigned>(
5950           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5951       MaxWidth = std::max<unsigned>(
5952           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5953     }
5954   }
5955   return {MinWidth, MaxWidth};
5956 }
5957 
5958 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5959   ElementTypesInLoop.clear();
5960   // For each block.
5961   for (BasicBlock *BB : TheLoop->blocks()) {
5962     // For each instruction in the loop.
5963     for (Instruction &I : BB->instructionsWithoutDebug()) {
5964       Type *T = I.getType();
5965 
5966       // Skip ignored values.
5967       if (ValuesToIgnore.count(&I))
5968         continue;
5969 
5970       // Only examine Loads, Stores and PHINodes.
5971       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5972         continue;
5973 
5974       // Examine PHI nodes that are reduction variables. Update the type to
5975       // account for the recurrence type.
5976       if (auto *PN = dyn_cast<PHINode>(&I)) {
5977         if (!Legal->isReductionVariable(PN))
5978           continue;
5979         const RecurrenceDescriptor &RdxDesc =
5980             Legal->getReductionVars().find(PN)->second;
5981         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5982             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5983                                       RdxDesc.getRecurrenceType(),
5984                                       TargetTransformInfo::ReductionFlags()))
5985           continue;
5986         T = RdxDesc.getRecurrenceType();
5987       }
5988 
5989       // Examine the stored values.
5990       if (auto *ST = dyn_cast<StoreInst>(&I))
5991         T = ST->getValueOperand()->getType();
5992 
5993       assert(T->isSized() &&
5994              "Expected the load/store/recurrence type to be sized");
5995 
5996       ElementTypesInLoop.insert(T);
5997     }
5998   }
5999 }
6000 
6001 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6002                                                            unsigned LoopCost) {
6003   // -- The interleave heuristics --
6004   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6005   // There are many micro-architectural considerations that we can't predict
6006   // at this level. For example, frontend pressure (on decode or fetch) due to
6007   // code size, or the number and capabilities of the execution ports.
6008   //
6009   // We use the following heuristics to select the interleave count:
6010   // 1. If the code has reductions, then we interleave to break the cross
6011   // iteration dependency.
6012   // 2. If the loop is really small, then we interleave to reduce the loop
6013   // overhead.
6014   // 3. We don't interleave if we think that we will spill registers to memory
6015   // due to the increased register pressure.
6016 
6017   if (!isScalarEpilogueAllowed())
6018     return 1;
6019 
6020   // We used the distance for the interleave count.
6021   if (Legal->getMaxSafeDepDistBytes() != -1U)
6022     return 1;
6023 
6024   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6025   const bool HasReductions = !Legal->getReductionVars().empty();
6026   // Do not interleave loops with a relatively small known or estimated trip
6027   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6028   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6029   // because with the above conditions interleaving can expose ILP and break
6030   // cross iteration dependences for reductions.
6031   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6032       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6033     return 1;
6034 
6035   RegisterUsage R = calculateRegisterUsage({VF})[0];
6036   // We divide by these constants so assume that we have at least one
6037   // instruction that uses at least one register.
6038   for (auto& pair : R.MaxLocalUsers) {
6039     pair.second = std::max(pair.second, 1U);
6040   }
6041 
6042   // We calculate the interleave count using the following formula.
6043   // Subtract the number of loop invariants from the number of available
6044   // registers. These registers are used by all of the interleaved instances.
6045   // Next, divide the remaining registers by the number of registers that is
6046   // required by the loop, in order to estimate how many parallel instances
6047   // fit without causing spills. All of this is rounded down if necessary to be
6048   // a power of two. We want power of two interleave count to simplify any
6049   // addressing operations or alignment considerations.
6050   // We also want power of two interleave counts to ensure that the induction
6051   // variable of the vector loop wraps to zero, when tail is folded by masking;
6052   // this currently happens when OptForSize, in which case IC is set to 1 above.
6053   unsigned IC = UINT_MAX;
6054 
6055   for (auto& pair : R.MaxLocalUsers) {
6056     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6057     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6058                       << " registers of "
6059                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6060     if (VF.isScalar()) {
6061       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6062         TargetNumRegisters = ForceTargetNumScalarRegs;
6063     } else {
6064       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6065         TargetNumRegisters = ForceTargetNumVectorRegs;
6066     }
6067     unsigned MaxLocalUsers = pair.second;
6068     unsigned LoopInvariantRegs = 0;
6069     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6070       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6071 
6072     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6073     // Don't count the induction variable as interleaved.
6074     if (EnableIndVarRegisterHeur) {
6075       TmpIC =
6076           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6077                         std::max(1U, (MaxLocalUsers - 1)));
6078     }
6079 
6080     IC = std::min(IC, TmpIC);
6081   }
6082 
6083   // Clamp the interleave ranges to reasonable counts.
6084   unsigned MaxInterleaveCount =
6085       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6086 
6087   // Check if the user has overridden the max.
6088   if (VF.isScalar()) {
6089     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6090       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6091   } else {
6092     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6093       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6094   }
6095 
6096   // If trip count is known or estimated compile time constant, limit the
6097   // interleave count to be less than the trip count divided by VF, provided it
6098   // is at least 1.
6099   //
6100   // For scalable vectors we can't know if interleaving is beneficial. It may
6101   // not be beneficial for small loops if none of the lanes in the second vector
6102   // iterations is enabled. However, for larger loops, there is likely to be a
6103   // similar benefit as for fixed-width vectors. For now, we choose to leave
6104   // the InterleaveCount as if vscale is '1', although if some information about
6105   // the vector is known (e.g. min vector size), we can make a better decision.
6106   if (BestKnownTC) {
6107     MaxInterleaveCount =
6108         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6109     // Make sure MaxInterleaveCount is greater than 0.
6110     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6111   }
6112 
6113   assert(MaxInterleaveCount > 0 &&
6114          "Maximum interleave count must be greater than 0");
6115 
6116   // Clamp the calculated IC to be between the 1 and the max interleave count
6117   // that the target and trip count allows.
6118   if (IC > MaxInterleaveCount)
6119     IC = MaxInterleaveCount;
6120   else
6121     // Make sure IC is greater than 0.
6122     IC = std::max(1u, IC);
6123 
6124   assert(IC > 0 && "Interleave count must be greater than 0.");
6125 
6126   // If we did not calculate the cost for VF (because the user selected the VF)
6127   // then we calculate the cost of VF here.
6128   if (LoopCost == 0) {
6129     InstructionCost C = expectedCost(VF).first;
6130     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
6131     LoopCost = *C.getValue();
6132   }
6133 
6134   assert(LoopCost && "Non-zero loop cost expected");
6135 
6136   // Interleave if we vectorized this loop and there is a reduction that could
6137   // benefit from interleaving.
6138   if (VF.isVector() && HasReductions) {
6139     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6140     return IC;
6141   }
6142 
6143   // Note that if we've already vectorized the loop we will have done the
6144   // runtime check and so interleaving won't require further checks.
6145   bool InterleavingRequiresRuntimePointerCheck =
6146       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6147 
6148   // We want to interleave small loops in order to reduce the loop overhead and
6149   // potentially expose ILP opportunities.
6150   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6151                     << "LV: IC is " << IC << '\n'
6152                     << "LV: VF is " << VF << '\n');
6153   const bool AggressivelyInterleaveReductions =
6154       TTI.enableAggressiveInterleaving(HasReductions);
6155   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6156     // We assume that the cost overhead is 1 and we use the cost model
6157     // to estimate the cost of the loop and interleave until the cost of the
6158     // loop overhead is about 5% of the cost of the loop.
6159     unsigned SmallIC =
6160         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6161 
6162     // Interleave until store/load ports (estimated by max interleave count) are
6163     // saturated.
6164     unsigned NumStores = Legal->getNumStores();
6165     unsigned NumLoads = Legal->getNumLoads();
6166     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6167     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6168 
6169     // There is little point in interleaving for reductions containing selects
6170     // and compares when VF=1 since it may just create more overhead than it's
6171     // worth for loops with small trip counts. This is because we still have to
6172     // do the final reduction after the loop.
6173     bool HasSelectCmpReductions =
6174         HasReductions &&
6175         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6176           const RecurrenceDescriptor &RdxDesc = Reduction.second;
6177           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
6178               RdxDesc.getRecurrenceKind());
6179         });
6180     if (HasSelectCmpReductions) {
6181       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
6182       return 1;
6183     }
6184 
6185     // If we have a scalar reduction (vector reductions are already dealt with
6186     // by this point), we can increase the critical path length if the loop
6187     // we're interleaving is inside another loop. For tree-wise reductions
6188     // set the limit to 2, and for ordered reductions it's best to disable
6189     // interleaving entirely.
6190     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6191       bool HasOrderedReductions =
6192           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6193             const RecurrenceDescriptor &RdxDesc = Reduction.second;
6194             return RdxDesc.isOrdered();
6195           });
6196       if (HasOrderedReductions) {
6197         LLVM_DEBUG(
6198             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
6199         return 1;
6200       }
6201 
6202       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6203       SmallIC = std::min(SmallIC, F);
6204       StoresIC = std::min(StoresIC, F);
6205       LoadsIC = std::min(LoadsIC, F);
6206     }
6207 
6208     if (EnableLoadStoreRuntimeInterleave &&
6209         std::max(StoresIC, LoadsIC) > SmallIC) {
6210       LLVM_DEBUG(
6211           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6212       return std::max(StoresIC, LoadsIC);
6213     }
6214 
6215     // If there are scalar reductions and TTI has enabled aggressive
6216     // interleaving for reductions, we will interleave to expose ILP.
6217     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6218         AggressivelyInterleaveReductions) {
6219       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6220       // Interleave no less than SmallIC but not as aggressive as the normal IC
6221       // to satisfy the rare situation when resources are too limited.
6222       return std::max(IC / 2, SmallIC);
6223     } else {
6224       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6225       return SmallIC;
6226     }
6227   }
6228 
6229   // Interleave if this is a large loop (small loops are already dealt with by
6230   // this point) that could benefit from interleaving.
6231   if (AggressivelyInterleaveReductions) {
6232     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6233     return IC;
6234   }
6235 
6236   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6237   return 1;
6238 }
6239 
6240 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6241 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6242   // This function calculates the register usage by measuring the highest number
6243   // of values that are alive at a single location. Obviously, this is a very
6244   // rough estimation. We scan the loop in a topological order in order and
6245   // assign a number to each instruction. We use RPO to ensure that defs are
6246   // met before their users. We assume that each instruction that has in-loop
6247   // users starts an interval. We record every time that an in-loop value is
6248   // used, so we have a list of the first and last occurrences of each
6249   // instruction. Next, we transpose this data structure into a multi map that
6250   // holds the list of intervals that *end* at a specific location. This multi
6251   // map allows us to perform a linear search. We scan the instructions linearly
6252   // and record each time that a new interval starts, by placing it in a set.
6253   // If we find this value in the multi-map then we remove it from the set.
6254   // The max register usage is the maximum size of the set.
6255   // We also search for instructions that are defined outside the loop, but are
6256   // used inside the loop. We need this number separately from the max-interval
6257   // usage number because when we unroll, loop-invariant values do not take
6258   // more register.
6259   LoopBlocksDFS DFS(TheLoop);
6260   DFS.perform(LI);
6261 
6262   RegisterUsage RU;
6263 
6264   // Each 'key' in the map opens a new interval. The values
6265   // of the map are the index of the 'last seen' usage of the
6266   // instruction that is the key.
6267   using IntervalMap = DenseMap<Instruction *, unsigned>;
6268 
6269   // Maps instruction to its index.
6270   SmallVector<Instruction *, 64> IdxToInstr;
6271   // Marks the end of each interval.
6272   IntervalMap EndPoint;
6273   // Saves the list of instruction indices that are used in the loop.
6274   SmallPtrSet<Instruction *, 8> Ends;
6275   // Saves the list of values that are used in the loop but are
6276   // defined outside the loop, such as arguments and constants.
6277   SmallPtrSet<Value *, 8> LoopInvariants;
6278 
6279   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6280     for (Instruction &I : BB->instructionsWithoutDebug()) {
6281       IdxToInstr.push_back(&I);
6282 
6283       // Save the end location of each USE.
6284       for (Value *U : I.operands()) {
6285         auto *Instr = dyn_cast<Instruction>(U);
6286 
6287         // Ignore non-instruction values such as arguments, constants, etc.
6288         if (!Instr)
6289           continue;
6290 
6291         // If this instruction is outside the loop then record it and continue.
6292         if (!TheLoop->contains(Instr)) {
6293           LoopInvariants.insert(Instr);
6294           continue;
6295         }
6296 
6297         // Overwrite previous end points.
6298         EndPoint[Instr] = IdxToInstr.size();
6299         Ends.insert(Instr);
6300       }
6301     }
6302   }
6303 
6304   // Saves the list of intervals that end with the index in 'key'.
6305   using InstrList = SmallVector<Instruction *, 2>;
6306   DenseMap<unsigned, InstrList> TransposeEnds;
6307 
6308   // Transpose the EndPoints to a list of values that end at each index.
6309   for (auto &Interval : EndPoint)
6310     TransposeEnds[Interval.second].push_back(Interval.first);
6311 
6312   SmallPtrSet<Instruction *, 8> OpenIntervals;
6313   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6314   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6315 
6316   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6317 
6318   // A lambda that gets the register usage for the given type and VF.
6319   const auto &TTICapture = TTI;
6320   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6321     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6322       return 0;
6323     InstructionCost::CostType RegUsage =
6324         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6325     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6326            "Nonsensical values for register usage.");
6327     return RegUsage;
6328   };
6329 
6330   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6331     Instruction *I = IdxToInstr[i];
6332 
6333     // Remove all of the instructions that end at this location.
6334     InstrList &List = TransposeEnds[i];
6335     for (Instruction *ToRemove : List)
6336       OpenIntervals.erase(ToRemove);
6337 
6338     // Ignore instructions that are never used within the loop.
6339     if (!Ends.count(I))
6340       continue;
6341 
6342     // Skip ignored values.
6343     if (ValuesToIgnore.count(I))
6344       continue;
6345 
6346     // For each VF find the maximum usage of registers.
6347     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6348       // Count the number of live intervals.
6349       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6350 
6351       if (VFs[j].isScalar()) {
6352         for (auto Inst : OpenIntervals) {
6353           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6354           if (RegUsage.find(ClassID) == RegUsage.end())
6355             RegUsage[ClassID] = 1;
6356           else
6357             RegUsage[ClassID] += 1;
6358         }
6359       } else {
6360         collectUniformsAndScalars(VFs[j]);
6361         for (auto Inst : OpenIntervals) {
6362           // Skip ignored values for VF > 1.
6363           if (VecValuesToIgnore.count(Inst))
6364             continue;
6365           if (isScalarAfterVectorization(Inst, VFs[j])) {
6366             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6367             if (RegUsage.find(ClassID) == RegUsage.end())
6368               RegUsage[ClassID] = 1;
6369             else
6370               RegUsage[ClassID] += 1;
6371           } else {
6372             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6373             if (RegUsage.find(ClassID) == RegUsage.end())
6374               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6375             else
6376               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6377           }
6378         }
6379       }
6380 
6381       for (auto& pair : RegUsage) {
6382         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6383           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6384         else
6385           MaxUsages[j][pair.first] = pair.second;
6386       }
6387     }
6388 
6389     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6390                       << OpenIntervals.size() << '\n');
6391 
6392     // Add the current instruction to the list of open intervals.
6393     OpenIntervals.insert(I);
6394   }
6395 
6396   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6397     SmallMapVector<unsigned, unsigned, 4> Invariant;
6398 
6399     for (auto Inst : LoopInvariants) {
6400       unsigned Usage =
6401           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6402       unsigned ClassID =
6403           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6404       if (Invariant.find(ClassID) == Invariant.end())
6405         Invariant[ClassID] = Usage;
6406       else
6407         Invariant[ClassID] += Usage;
6408     }
6409 
6410     LLVM_DEBUG({
6411       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6412       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6413              << " item\n";
6414       for (const auto &pair : MaxUsages[i]) {
6415         dbgs() << "LV(REG): RegisterClass: "
6416                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6417                << " registers\n";
6418       }
6419       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6420              << " item\n";
6421       for (const auto &pair : Invariant) {
6422         dbgs() << "LV(REG): RegisterClass: "
6423                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6424                << " registers\n";
6425       }
6426     });
6427 
6428     RU.LoopInvariantRegs = Invariant;
6429     RU.MaxLocalUsers = MaxUsages[i];
6430     RUs[i] = RU;
6431   }
6432 
6433   return RUs;
6434 }
6435 
6436 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6437   // TODO: Cost model for emulated masked load/store is completely
6438   // broken. This hack guides the cost model to use an artificially
6439   // high enough value to practically disable vectorization with such
6440   // operations, except where previously deployed legality hack allowed
6441   // using very low cost values. This is to avoid regressions coming simply
6442   // from moving "masked load/store" check from legality to cost model.
6443   // Masked Load/Gather emulation was previously never allowed.
6444   // Limited number of Masked Store/Scatter emulation was allowed.
6445   assert(isPredicatedInst(I) &&
6446          "Expecting a scalar emulated instruction");
6447   return isa<LoadInst>(I) ||
6448          (isa<StoreInst>(I) &&
6449           NumPredStores > NumberOfStoresToPredicate);
6450 }
6451 
6452 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6453   // If we aren't vectorizing the loop, or if we've already collected the
6454   // instructions to scalarize, there's nothing to do. Collection may already
6455   // have occurred if we have a user-selected VF and are now computing the
6456   // expected cost for interleaving.
6457   if (VF.isScalar() || VF.isZero() ||
6458       InstsToScalarize.find(VF) != InstsToScalarize.end())
6459     return;
6460 
6461   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6462   // not profitable to scalarize any instructions, the presence of VF in the
6463   // map will indicate that we've analyzed it already.
6464   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6465 
6466   // Find all the instructions that are scalar with predication in the loop and
6467   // determine if it would be better to not if-convert the blocks they are in.
6468   // If so, we also record the instructions to scalarize.
6469   for (BasicBlock *BB : TheLoop->blocks()) {
6470     if (!blockNeedsPredicationForAnyReason(BB))
6471       continue;
6472     for (Instruction &I : *BB)
6473       if (isScalarWithPredication(&I)) {
6474         ScalarCostsTy ScalarCosts;
6475         // Do not apply discount if scalable, because that would lead to
6476         // invalid scalarization costs.
6477         // Do not apply discount logic if hacked cost is needed
6478         // for emulated masked memrefs.
6479         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) &&
6480             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6481           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6482         // Remember that BB will remain after vectorization.
6483         PredicatedBBsAfterVectorization.insert(BB);
6484       }
6485   }
6486 }
6487 
6488 int LoopVectorizationCostModel::computePredInstDiscount(
6489     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6490   assert(!isUniformAfterVectorization(PredInst, VF) &&
6491          "Instruction marked uniform-after-vectorization will be predicated");
6492 
6493   // Initialize the discount to zero, meaning that the scalar version and the
6494   // vector version cost the same.
6495   InstructionCost Discount = 0;
6496 
6497   // Holds instructions to analyze. The instructions we visit are mapped in
6498   // ScalarCosts. Those instructions are the ones that would be scalarized if
6499   // we find that the scalar version costs less.
6500   SmallVector<Instruction *, 8> Worklist;
6501 
6502   // Returns true if the given instruction can be scalarized.
6503   auto canBeScalarized = [&](Instruction *I) -> bool {
6504     // We only attempt to scalarize instructions forming a single-use chain
6505     // from the original predicated block that would otherwise be vectorized.
6506     // Although not strictly necessary, we give up on instructions we know will
6507     // already be scalar to avoid traversing chains that are unlikely to be
6508     // beneficial.
6509     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6510         isScalarAfterVectorization(I, VF))
6511       return false;
6512 
6513     // If the instruction is scalar with predication, it will be analyzed
6514     // separately. We ignore it within the context of PredInst.
6515     if (isScalarWithPredication(I))
6516       return false;
6517 
6518     // If any of the instruction's operands are uniform after vectorization,
6519     // the instruction cannot be scalarized. This prevents, for example, a
6520     // masked load from being scalarized.
6521     //
6522     // We assume we will only emit a value for lane zero of an instruction
6523     // marked uniform after vectorization, rather than VF identical values.
6524     // Thus, if we scalarize an instruction that uses a uniform, we would
6525     // create uses of values corresponding to the lanes we aren't emitting code
6526     // for. This behavior can be changed by allowing getScalarValue to clone
6527     // the lane zero values for uniforms rather than asserting.
6528     for (Use &U : I->operands())
6529       if (auto *J = dyn_cast<Instruction>(U.get()))
6530         if (isUniformAfterVectorization(J, VF))
6531           return false;
6532 
6533     // Otherwise, we can scalarize the instruction.
6534     return true;
6535   };
6536 
6537   // Compute the expected cost discount from scalarizing the entire expression
6538   // feeding the predicated instruction. We currently only consider expressions
6539   // that are single-use instruction chains.
6540   Worklist.push_back(PredInst);
6541   while (!Worklist.empty()) {
6542     Instruction *I = Worklist.pop_back_val();
6543 
6544     // If we've already analyzed the instruction, there's nothing to do.
6545     if (ScalarCosts.find(I) != ScalarCosts.end())
6546       continue;
6547 
6548     // Compute the cost of the vector instruction. Note that this cost already
6549     // includes the scalarization overhead of the predicated instruction.
6550     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6551 
6552     // Compute the cost of the scalarized instruction. This cost is the cost of
6553     // the instruction as if it wasn't if-converted and instead remained in the
6554     // predicated block. We will scale this cost by block probability after
6555     // computing the scalarization overhead.
6556     InstructionCost ScalarCost =
6557         VF.getFixedValue() *
6558         getInstructionCost(I, ElementCount::getFixed(1)).first;
6559 
6560     // Compute the scalarization overhead of needed insertelement instructions
6561     // and phi nodes.
6562     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6563       ScalarCost += TTI.getScalarizationOverhead(
6564           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6565           APInt::getAllOnes(VF.getFixedValue()), true, false);
6566       ScalarCost +=
6567           VF.getFixedValue() *
6568           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6569     }
6570 
6571     // Compute the scalarization overhead of needed extractelement
6572     // instructions. For each of the instruction's operands, if the operand can
6573     // be scalarized, add it to the worklist; otherwise, account for the
6574     // overhead.
6575     for (Use &U : I->operands())
6576       if (auto *J = dyn_cast<Instruction>(U.get())) {
6577         assert(VectorType::isValidElementType(J->getType()) &&
6578                "Instruction has non-scalar type");
6579         if (canBeScalarized(J))
6580           Worklist.push_back(J);
6581         else if (needsExtract(J, VF)) {
6582           ScalarCost += TTI.getScalarizationOverhead(
6583               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6584               APInt::getAllOnes(VF.getFixedValue()), false, true);
6585         }
6586       }
6587 
6588     // Scale the total scalar cost by block probability.
6589     ScalarCost /= getReciprocalPredBlockProb();
6590 
6591     // Compute the discount. A non-negative discount means the vector version
6592     // of the instruction costs more, and scalarizing would be beneficial.
6593     Discount += VectorCost - ScalarCost;
6594     ScalarCosts[I] = ScalarCost;
6595   }
6596 
6597   return *Discount.getValue();
6598 }
6599 
6600 LoopVectorizationCostModel::VectorizationCostTy
6601 LoopVectorizationCostModel::expectedCost(
6602     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6603   VectorizationCostTy Cost;
6604 
6605   // For each block.
6606   for (BasicBlock *BB : TheLoop->blocks()) {
6607     VectorizationCostTy BlockCost;
6608 
6609     // For each instruction in the old loop.
6610     for (Instruction &I : BB->instructionsWithoutDebug()) {
6611       // Skip ignored values.
6612       if (ValuesToIgnore.count(&I) ||
6613           (VF.isVector() && VecValuesToIgnore.count(&I)))
6614         continue;
6615 
6616       VectorizationCostTy C = getInstructionCost(&I, VF);
6617 
6618       // Check if we should override the cost.
6619       if (C.first.isValid() &&
6620           ForceTargetInstructionCost.getNumOccurrences() > 0)
6621         C.first = InstructionCost(ForceTargetInstructionCost);
6622 
6623       // Keep a list of instructions with invalid costs.
6624       if (Invalid && !C.first.isValid())
6625         Invalid->emplace_back(&I, VF);
6626 
6627       BlockCost.first += C.first;
6628       BlockCost.second |= C.second;
6629       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6630                         << " for VF " << VF << " For instruction: " << I
6631                         << '\n');
6632     }
6633 
6634     // If we are vectorizing a predicated block, it will have been
6635     // if-converted. This means that the block's instructions (aside from
6636     // stores and instructions that may divide by zero) will now be
6637     // unconditionally executed. For the scalar case, we may not always execute
6638     // the predicated block, if it is an if-else block. Thus, scale the block's
6639     // cost by the probability of executing it. blockNeedsPredication from
6640     // Legal is used so as to not include all blocks in tail folded loops.
6641     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6642       BlockCost.first /= getReciprocalPredBlockProb();
6643 
6644     Cost.first += BlockCost.first;
6645     Cost.second |= BlockCost.second;
6646   }
6647 
6648   return Cost;
6649 }
6650 
6651 /// Gets Address Access SCEV after verifying that the access pattern
6652 /// is loop invariant except the induction variable dependence.
6653 ///
6654 /// This SCEV can be sent to the Target in order to estimate the address
6655 /// calculation cost.
6656 static const SCEV *getAddressAccessSCEV(
6657               Value *Ptr,
6658               LoopVectorizationLegality *Legal,
6659               PredicatedScalarEvolution &PSE,
6660               const Loop *TheLoop) {
6661 
6662   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6663   if (!Gep)
6664     return nullptr;
6665 
6666   // We are looking for a gep with all loop invariant indices except for one
6667   // which should be an induction variable.
6668   auto SE = PSE.getSE();
6669   unsigned NumOperands = Gep->getNumOperands();
6670   for (unsigned i = 1; i < NumOperands; ++i) {
6671     Value *Opd = Gep->getOperand(i);
6672     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6673         !Legal->isInductionVariable(Opd))
6674       return nullptr;
6675   }
6676 
6677   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6678   return PSE.getSCEV(Ptr);
6679 }
6680 
6681 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6682   return Legal->hasStride(I->getOperand(0)) ||
6683          Legal->hasStride(I->getOperand(1));
6684 }
6685 
6686 InstructionCost
6687 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6688                                                         ElementCount VF) {
6689   assert(VF.isVector() &&
6690          "Scalarization cost of instruction implies vectorization.");
6691   if (VF.isScalable())
6692     return InstructionCost::getInvalid();
6693 
6694   Type *ValTy = getLoadStoreType(I);
6695   auto SE = PSE.getSE();
6696 
6697   unsigned AS = getLoadStoreAddressSpace(I);
6698   Value *Ptr = getLoadStorePointerOperand(I);
6699   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6700   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6701   //       that it is being called from this specific place.
6702 
6703   // Figure out whether the access is strided and get the stride value
6704   // if it's known in compile time
6705   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6706 
6707   // Get the cost of the scalar memory instruction and address computation.
6708   InstructionCost Cost =
6709       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6710 
6711   // Don't pass *I here, since it is scalar but will actually be part of a
6712   // vectorized loop where the user of it is a vectorized instruction.
6713   const Align Alignment = getLoadStoreAlignment(I);
6714   Cost += VF.getKnownMinValue() *
6715           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6716                               AS, TTI::TCK_RecipThroughput);
6717 
6718   // Get the overhead of the extractelement and insertelement instructions
6719   // we might create due to scalarization.
6720   Cost += getScalarizationOverhead(I, VF);
6721 
6722   // If we have a predicated load/store, it will need extra i1 extracts and
6723   // conditional branches, but may not be executed for each vector lane. Scale
6724   // the cost by the probability of executing the predicated block.
6725   if (isPredicatedInst(I)) {
6726     Cost /= getReciprocalPredBlockProb();
6727 
6728     // Add the cost of an i1 extract and a branch
6729     auto *Vec_i1Ty =
6730         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6731     Cost += TTI.getScalarizationOverhead(
6732         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6733         /*Insert=*/false, /*Extract=*/true);
6734     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6735 
6736     if (useEmulatedMaskMemRefHack(I))
6737       // Artificially setting to a high enough value to practically disable
6738       // vectorization with such operations.
6739       Cost = 3000000;
6740   }
6741 
6742   return Cost;
6743 }
6744 
6745 InstructionCost
6746 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6747                                                     ElementCount VF) {
6748   Type *ValTy = getLoadStoreType(I);
6749   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6750   Value *Ptr = getLoadStorePointerOperand(I);
6751   unsigned AS = getLoadStoreAddressSpace(I);
6752   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6753   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6754 
6755   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6756          "Stride should be 1 or -1 for consecutive memory access");
6757   const Align Alignment = getLoadStoreAlignment(I);
6758   InstructionCost Cost = 0;
6759   if (Legal->isMaskRequired(I))
6760     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6761                                       CostKind);
6762   else
6763     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6764                                 CostKind, I);
6765 
6766   bool Reverse = ConsecutiveStride < 0;
6767   if (Reverse)
6768     Cost +=
6769         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6770   return Cost;
6771 }
6772 
6773 InstructionCost
6774 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6775                                                 ElementCount VF) {
6776   assert(Legal->isUniformMemOp(*I));
6777 
6778   Type *ValTy = getLoadStoreType(I);
6779   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6780   const Align Alignment = getLoadStoreAlignment(I);
6781   unsigned AS = getLoadStoreAddressSpace(I);
6782   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6783   if (isa<LoadInst>(I)) {
6784     return TTI.getAddressComputationCost(ValTy) +
6785            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6786                                CostKind) +
6787            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6788   }
6789   StoreInst *SI = cast<StoreInst>(I);
6790 
6791   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6792   return TTI.getAddressComputationCost(ValTy) +
6793          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6794                              CostKind) +
6795          (isLoopInvariantStoreValue
6796               ? 0
6797               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6798                                        VF.getKnownMinValue() - 1));
6799 }
6800 
6801 InstructionCost
6802 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6803                                                  ElementCount VF) {
6804   Type *ValTy = getLoadStoreType(I);
6805   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6806   const Align Alignment = getLoadStoreAlignment(I);
6807   const Value *Ptr = getLoadStorePointerOperand(I);
6808 
6809   return TTI.getAddressComputationCost(VectorTy) +
6810          TTI.getGatherScatterOpCost(
6811              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6812              TargetTransformInfo::TCK_RecipThroughput, I);
6813 }
6814 
6815 InstructionCost
6816 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6817                                                    ElementCount VF) {
6818   // TODO: Once we have support for interleaving with scalable vectors
6819   // we can calculate the cost properly here.
6820   if (VF.isScalable())
6821     return InstructionCost::getInvalid();
6822 
6823   Type *ValTy = getLoadStoreType(I);
6824   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6825   unsigned AS = getLoadStoreAddressSpace(I);
6826 
6827   auto Group = getInterleavedAccessGroup(I);
6828   assert(Group && "Fail to get an interleaved access group.");
6829 
6830   unsigned InterleaveFactor = Group->getFactor();
6831   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6832 
6833   // Holds the indices of existing members in the interleaved group.
6834   SmallVector<unsigned, 4> Indices;
6835   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6836     if (Group->getMember(IF))
6837       Indices.push_back(IF);
6838 
6839   // Calculate the cost of the whole interleaved group.
6840   bool UseMaskForGaps =
6841       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6842       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6843   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6844       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6845       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6846 
6847   if (Group->isReverse()) {
6848     // TODO: Add support for reversed masked interleaved access.
6849     assert(!Legal->isMaskRequired(I) &&
6850            "Reverse masked interleaved access not supported.");
6851     Cost +=
6852         Group->getNumMembers() *
6853         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6854   }
6855   return Cost;
6856 }
6857 
6858 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6859     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6860   using namespace llvm::PatternMatch;
6861   // Early exit for no inloop reductions
6862   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6863     return None;
6864   auto *VectorTy = cast<VectorType>(Ty);
6865 
6866   // We are looking for a pattern of, and finding the minimal acceptable cost:
6867   //  reduce(mul(ext(A), ext(B))) or
6868   //  reduce(mul(A, B)) or
6869   //  reduce(ext(A)) or
6870   //  reduce(A).
6871   // The basic idea is that we walk down the tree to do that, finding the root
6872   // reduction instruction in InLoopReductionImmediateChains. From there we find
6873   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6874   // of the components. If the reduction cost is lower then we return it for the
6875   // reduction instruction and 0 for the other instructions in the pattern. If
6876   // it is not we return an invalid cost specifying the orignal cost method
6877   // should be used.
6878   Instruction *RetI = I;
6879   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6880     if (!RetI->hasOneUser())
6881       return None;
6882     RetI = RetI->user_back();
6883   }
6884   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6885       RetI->user_back()->getOpcode() == Instruction::Add) {
6886     if (!RetI->hasOneUser())
6887       return None;
6888     RetI = RetI->user_back();
6889   }
6890 
6891   // Test if the found instruction is a reduction, and if not return an invalid
6892   // cost specifying the parent to use the original cost modelling.
6893   if (!InLoopReductionImmediateChains.count(RetI))
6894     return None;
6895 
6896   // Find the reduction this chain is a part of and calculate the basic cost of
6897   // the reduction on its own.
6898   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6899   Instruction *ReductionPhi = LastChain;
6900   while (!isa<PHINode>(ReductionPhi))
6901     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6902 
6903   const RecurrenceDescriptor &RdxDesc =
6904       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6905 
6906   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6907       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6908 
6909   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6910   // normal fmul instruction to the cost of the fadd reduction.
6911   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6912     BaseCost +=
6913         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6914 
6915   // If we're using ordered reductions then we can just return the base cost
6916   // here, since getArithmeticReductionCost calculates the full ordered
6917   // reduction cost when FP reassociation is not allowed.
6918   if (useOrderedReductions(RdxDesc))
6919     return BaseCost;
6920 
6921   // Get the operand that was not the reduction chain and match it to one of the
6922   // patterns, returning the better cost if it is found.
6923   Instruction *RedOp = RetI->getOperand(1) == LastChain
6924                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6925                            : dyn_cast<Instruction>(RetI->getOperand(1));
6926 
6927   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6928 
6929   Instruction *Op0, *Op1;
6930   if (RedOp &&
6931       match(RedOp,
6932             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6933       match(Op0, m_ZExtOrSExt(m_Value())) &&
6934       Op0->getOpcode() == Op1->getOpcode() &&
6935       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6936       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6937       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6938 
6939     // Matched reduce(ext(mul(ext(A), ext(B)))
6940     // Note that the extend opcodes need to all match, or if A==B they will have
6941     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6942     // which is equally fine.
6943     bool IsUnsigned = isa<ZExtInst>(Op0);
6944     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6945     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6946 
6947     InstructionCost ExtCost =
6948         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6949                              TTI::CastContextHint::None, CostKind, Op0);
6950     InstructionCost MulCost =
6951         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6952     InstructionCost Ext2Cost =
6953         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6954                              TTI::CastContextHint::None, CostKind, RedOp);
6955 
6956     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6957         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6958         CostKind);
6959 
6960     if (RedCost.isValid() &&
6961         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6962       return I == RetI ? RedCost : 0;
6963   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6964              !TheLoop->isLoopInvariant(RedOp)) {
6965     // Matched reduce(ext(A))
6966     bool IsUnsigned = isa<ZExtInst>(RedOp);
6967     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6968     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6969         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6970         CostKind);
6971 
6972     InstructionCost ExtCost =
6973         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6974                              TTI::CastContextHint::None, CostKind, RedOp);
6975     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6976       return I == RetI ? RedCost : 0;
6977   } else if (RedOp &&
6978              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6979     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6980         Op0->getOpcode() == Op1->getOpcode() &&
6981         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6982       bool IsUnsigned = isa<ZExtInst>(Op0);
6983       Type *Op0Ty = Op0->getOperand(0)->getType();
6984       Type *Op1Ty = Op1->getOperand(0)->getType();
6985       Type *LargestOpTy =
6986           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6987                                                                     : Op0Ty;
6988       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6989 
6990       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6991       // different sizes. We take the largest type as the ext to reduce, and add
6992       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6993       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6994           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6995           TTI::CastContextHint::None, CostKind, Op0);
6996       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6997           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6998           TTI::CastContextHint::None, CostKind, Op1);
6999       InstructionCost MulCost =
7000           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7001 
7002       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7003           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7004           CostKind);
7005       InstructionCost ExtraExtCost = 0;
7006       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
7007         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
7008         ExtraExtCost = TTI.getCastInstrCost(
7009             ExtraExtOp->getOpcode(), ExtType,
7010             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
7011             TTI::CastContextHint::None, CostKind, ExtraExtOp);
7012       }
7013 
7014       if (RedCost.isValid() &&
7015           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
7016         return I == RetI ? RedCost : 0;
7017     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
7018       // Matched reduce(mul())
7019       InstructionCost MulCost =
7020           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7021 
7022       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7023           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7024           CostKind);
7025 
7026       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7027         return I == RetI ? RedCost : 0;
7028     }
7029   }
7030 
7031   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
7032 }
7033 
7034 InstructionCost
7035 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7036                                                      ElementCount VF) {
7037   // Calculate scalar cost only. Vectorization cost should be ready at this
7038   // moment.
7039   if (VF.isScalar()) {
7040     Type *ValTy = getLoadStoreType(I);
7041     const Align Alignment = getLoadStoreAlignment(I);
7042     unsigned AS = getLoadStoreAddressSpace(I);
7043 
7044     return TTI.getAddressComputationCost(ValTy) +
7045            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7046                                TTI::TCK_RecipThroughput, I);
7047   }
7048   return getWideningCost(I, VF);
7049 }
7050 
7051 LoopVectorizationCostModel::VectorizationCostTy
7052 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7053                                                ElementCount VF) {
7054   // If we know that this instruction will remain uniform, check the cost of
7055   // the scalar version.
7056   if (isUniformAfterVectorization(I, VF))
7057     VF = ElementCount::getFixed(1);
7058 
7059   if (VF.isVector() && isProfitableToScalarize(I, VF))
7060     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7061 
7062   // Forced scalars do not have any scalarization overhead.
7063   auto ForcedScalar = ForcedScalars.find(VF);
7064   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7065     auto InstSet = ForcedScalar->second;
7066     if (InstSet.count(I))
7067       return VectorizationCostTy(
7068           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7069            VF.getKnownMinValue()),
7070           false);
7071   }
7072 
7073   Type *VectorTy;
7074   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7075 
7076   bool TypeNotScalarized = false;
7077   if (VF.isVector() && VectorTy->isVectorTy()) {
7078     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
7079     if (NumParts)
7080       TypeNotScalarized = NumParts < VF.getKnownMinValue();
7081     else
7082       C = InstructionCost::getInvalid();
7083   }
7084   return VectorizationCostTy(C, TypeNotScalarized);
7085 }
7086 
7087 InstructionCost
7088 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7089                                                      ElementCount VF) const {
7090 
7091   // There is no mechanism yet to create a scalable scalarization loop,
7092   // so this is currently Invalid.
7093   if (VF.isScalable())
7094     return InstructionCost::getInvalid();
7095 
7096   if (VF.isScalar())
7097     return 0;
7098 
7099   InstructionCost Cost = 0;
7100   Type *RetTy = ToVectorTy(I->getType(), VF);
7101   if (!RetTy->isVoidTy() &&
7102       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7103     Cost += TTI.getScalarizationOverhead(
7104         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
7105         false);
7106 
7107   // Some targets keep addresses scalar.
7108   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7109     return Cost;
7110 
7111   // Some targets support efficient element stores.
7112   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7113     return Cost;
7114 
7115   // Collect operands to consider.
7116   CallInst *CI = dyn_cast<CallInst>(I);
7117   Instruction::op_range Ops = CI ? CI->args() : I->operands();
7118 
7119   // Skip operands that do not require extraction/scalarization and do not incur
7120   // any overhead.
7121   SmallVector<Type *> Tys;
7122   for (auto *V : filterExtractingOperands(Ops, VF))
7123     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7124   return Cost + TTI.getOperandsScalarizationOverhead(
7125                     filterExtractingOperands(Ops, VF), Tys);
7126 }
7127 
7128 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7129   if (VF.isScalar())
7130     return;
7131   NumPredStores = 0;
7132   for (BasicBlock *BB : TheLoop->blocks()) {
7133     // For each instruction in the old loop.
7134     for (Instruction &I : *BB) {
7135       Value *Ptr =  getLoadStorePointerOperand(&I);
7136       if (!Ptr)
7137         continue;
7138 
7139       // TODO: We should generate better code and update the cost model for
7140       // predicated uniform stores. Today they are treated as any other
7141       // predicated store (see added test cases in
7142       // invariant-store-vectorization.ll).
7143       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7144         NumPredStores++;
7145 
7146       if (Legal->isUniformMemOp(I)) {
7147         // TODO: Avoid replicating loads and stores instead of
7148         // relying on instcombine to remove them.
7149         // Load: Scalar load + broadcast
7150         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7151         InstructionCost Cost;
7152         if (isa<StoreInst>(&I) && VF.isScalable() &&
7153             isLegalGatherOrScatter(&I)) {
7154           Cost = getGatherScatterCost(&I, VF);
7155           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7156         } else {
7157           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7158                  "Cannot yet scalarize uniform stores");
7159           Cost = getUniformMemOpCost(&I, VF);
7160           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7161         }
7162         continue;
7163       }
7164 
7165       // We assume that widening is the best solution when possible.
7166       if (memoryInstructionCanBeWidened(&I, VF)) {
7167         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7168         int ConsecutiveStride = Legal->isConsecutivePtr(
7169             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
7170         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7171                "Expected consecutive stride.");
7172         InstWidening Decision =
7173             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7174         setWideningDecision(&I, VF, Decision, Cost);
7175         continue;
7176       }
7177 
7178       // Choose between Interleaving, Gather/Scatter or Scalarization.
7179       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7180       unsigned NumAccesses = 1;
7181       if (isAccessInterleaved(&I)) {
7182         auto Group = getInterleavedAccessGroup(&I);
7183         assert(Group && "Fail to get an interleaved access group.");
7184 
7185         // Make one decision for the whole group.
7186         if (getWideningDecision(&I, VF) != CM_Unknown)
7187           continue;
7188 
7189         NumAccesses = Group->getNumMembers();
7190         if (interleavedAccessCanBeWidened(&I, VF))
7191           InterleaveCost = getInterleaveGroupCost(&I, VF);
7192       }
7193 
7194       InstructionCost GatherScatterCost =
7195           isLegalGatherOrScatter(&I)
7196               ? getGatherScatterCost(&I, VF) * NumAccesses
7197               : InstructionCost::getInvalid();
7198 
7199       InstructionCost ScalarizationCost =
7200           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7201 
7202       // Choose better solution for the current VF,
7203       // write down this decision and use it during vectorization.
7204       InstructionCost Cost;
7205       InstWidening Decision;
7206       if (InterleaveCost <= GatherScatterCost &&
7207           InterleaveCost < ScalarizationCost) {
7208         Decision = CM_Interleave;
7209         Cost = InterleaveCost;
7210       } else if (GatherScatterCost < ScalarizationCost) {
7211         Decision = CM_GatherScatter;
7212         Cost = GatherScatterCost;
7213       } else {
7214         Decision = CM_Scalarize;
7215         Cost = ScalarizationCost;
7216       }
7217       // If the instructions belongs to an interleave group, the whole group
7218       // receives the same decision. The whole group receives the cost, but
7219       // the cost will actually be assigned to one instruction.
7220       if (auto Group = getInterleavedAccessGroup(&I))
7221         setWideningDecision(Group, VF, Decision, Cost);
7222       else
7223         setWideningDecision(&I, VF, Decision, Cost);
7224     }
7225   }
7226 
7227   // Make sure that any load of address and any other address computation
7228   // remains scalar unless there is gather/scatter support. This avoids
7229   // inevitable extracts into address registers, and also has the benefit of
7230   // activating LSR more, since that pass can't optimize vectorized
7231   // addresses.
7232   if (TTI.prefersVectorizedAddressing())
7233     return;
7234 
7235   // Start with all scalar pointer uses.
7236   SmallPtrSet<Instruction *, 8> AddrDefs;
7237   for (BasicBlock *BB : TheLoop->blocks())
7238     for (Instruction &I : *BB) {
7239       Instruction *PtrDef =
7240         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7241       if (PtrDef && TheLoop->contains(PtrDef) &&
7242           getWideningDecision(&I, VF) != CM_GatherScatter)
7243         AddrDefs.insert(PtrDef);
7244     }
7245 
7246   // Add all instructions used to generate the addresses.
7247   SmallVector<Instruction *, 4> Worklist;
7248   append_range(Worklist, AddrDefs);
7249   while (!Worklist.empty()) {
7250     Instruction *I = Worklist.pop_back_val();
7251     for (auto &Op : I->operands())
7252       if (auto *InstOp = dyn_cast<Instruction>(Op))
7253         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7254             AddrDefs.insert(InstOp).second)
7255           Worklist.push_back(InstOp);
7256   }
7257 
7258   for (auto *I : AddrDefs) {
7259     if (isa<LoadInst>(I)) {
7260       // Setting the desired widening decision should ideally be handled in
7261       // by cost functions, but since this involves the task of finding out
7262       // if the loaded register is involved in an address computation, it is
7263       // instead changed here when we know this is the case.
7264       InstWidening Decision = getWideningDecision(I, VF);
7265       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7266         // Scalarize a widened load of address.
7267         setWideningDecision(
7268             I, VF, CM_Scalarize,
7269             (VF.getKnownMinValue() *
7270              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7271       else if (auto Group = getInterleavedAccessGroup(I)) {
7272         // Scalarize an interleave group of address loads.
7273         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7274           if (Instruction *Member = Group->getMember(I))
7275             setWideningDecision(
7276                 Member, VF, CM_Scalarize,
7277                 (VF.getKnownMinValue() *
7278                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7279         }
7280       }
7281     } else
7282       // Make sure I gets scalarized and a cost estimate without
7283       // scalarization overhead.
7284       ForcedScalars[VF].insert(I);
7285   }
7286 }
7287 
7288 InstructionCost
7289 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7290                                                Type *&VectorTy) {
7291   Type *RetTy = I->getType();
7292   if (canTruncateToMinimalBitwidth(I, VF))
7293     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7294   auto SE = PSE.getSE();
7295   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7296 
7297   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7298                                                 ElementCount VF) -> bool {
7299     if (VF.isScalar())
7300       return true;
7301 
7302     auto Scalarized = InstsToScalarize.find(VF);
7303     assert(Scalarized != InstsToScalarize.end() &&
7304            "VF not yet analyzed for scalarization profitability");
7305     return !Scalarized->second.count(I) &&
7306            llvm::all_of(I->users(), [&](User *U) {
7307              auto *UI = cast<Instruction>(U);
7308              return !Scalarized->second.count(UI);
7309            });
7310   };
7311   (void) hasSingleCopyAfterVectorization;
7312 
7313   if (isScalarAfterVectorization(I, VF)) {
7314     // With the exception of GEPs and PHIs, after scalarization there should
7315     // only be one copy of the instruction generated in the loop. This is
7316     // because the VF is either 1, or any instructions that need scalarizing
7317     // have already been dealt with by the the time we get here. As a result,
7318     // it means we don't have to multiply the instruction cost by VF.
7319     assert(I->getOpcode() == Instruction::GetElementPtr ||
7320            I->getOpcode() == Instruction::PHI ||
7321            (I->getOpcode() == Instruction::BitCast &&
7322             I->getType()->isPointerTy()) ||
7323            hasSingleCopyAfterVectorization(I, VF));
7324     VectorTy = RetTy;
7325   } else
7326     VectorTy = ToVectorTy(RetTy, VF);
7327 
7328   // TODO: We need to estimate the cost of intrinsic calls.
7329   switch (I->getOpcode()) {
7330   case Instruction::GetElementPtr:
7331     // We mark this instruction as zero-cost because the cost of GEPs in
7332     // vectorized code depends on whether the corresponding memory instruction
7333     // is scalarized or not. Therefore, we handle GEPs with the memory
7334     // instruction cost.
7335     return 0;
7336   case Instruction::Br: {
7337     // In cases of scalarized and predicated instructions, there will be VF
7338     // predicated blocks in the vectorized loop. Each branch around these
7339     // blocks requires also an extract of its vector compare i1 element.
7340     bool ScalarPredicatedBB = false;
7341     BranchInst *BI = cast<BranchInst>(I);
7342     if (VF.isVector() && BI->isConditional() &&
7343         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7344          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7345       ScalarPredicatedBB = true;
7346 
7347     if (ScalarPredicatedBB) {
7348       // Not possible to scalarize scalable vector with predicated instructions.
7349       if (VF.isScalable())
7350         return InstructionCost::getInvalid();
7351       // Return cost for branches around scalarized and predicated blocks.
7352       auto *Vec_i1Ty =
7353           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7354       return (
7355           TTI.getScalarizationOverhead(
7356               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7357           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7358     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7359       // The back-edge branch will remain, as will all scalar branches.
7360       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7361     else
7362       // This branch will be eliminated by if-conversion.
7363       return 0;
7364     // Note: We currently assume zero cost for an unconditional branch inside
7365     // a predicated block since it will become a fall-through, although we
7366     // may decide in the future to call TTI for all branches.
7367   }
7368   case Instruction::PHI: {
7369     auto *Phi = cast<PHINode>(I);
7370 
7371     // First-order recurrences are replaced by vector shuffles inside the loop.
7372     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7373     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7374       return TTI.getShuffleCost(
7375           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7376           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7377 
7378     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7379     // converted into select instructions. We require N - 1 selects per phi
7380     // node, where N is the number of incoming values.
7381     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7382       return (Phi->getNumIncomingValues() - 1) *
7383              TTI.getCmpSelInstrCost(
7384                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7385                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7386                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7387 
7388     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7389   }
7390   case Instruction::UDiv:
7391   case Instruction::SDiv:
7392   case Instruction::URem:
7393   case Instruction::SRem:
7394     // If we have a predicated instruction, it may not be executed for each
7395     // vector lane. Get the scalarization cost and scale this amount by the
7396     // probability of executing the predicated block. If the instruction is not
7397     // predicated, we fall through to the next case.
7398     if (VF.isVector() && isScalarWithPredication(I)) {
7399       InstructionCost Cost = 0;
7400 
7401       // These instructions have a non-void type, so account for the phi nodes
7402       // that we will create. This cost is likely to be zero. The phi node
7403       // cost, if any, should be scaled by the block probability because it
7404       // models a copy at the end of each predicated block.
7405       Cost += VF.getKnownMinValue() *
7406               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7407 
7408       // The cost of the non-predicated instruction.
7409       Cost += VF.getKnownMinValue() *
7410               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7411 
7412       // The cost of insertelement and extractelement instructions needed for
7413       // scalarization.
7414       Cost += getScalarizationOverhead(I, VF);
7415 
7416       // Scale the cost by the probability of executing the predicated blocks.
7417       // This assumes the predicated block for each vector lane is equally
7418       // likely.
7419       return Cost / getReciprocalPredBlockProb();
7420     }
7421     LLVM_FALLTHROUGH;
7422   case Instruction::Add:
7423   case Instruction::FAdd:
7424   case Instruction::Sub:
7425   case Instruction::FSub:
7426   case Instruction::Mul:
7427   case Instruction::FMul:
7428   case Instruction::FDiv:
7429   case Instruction::FRem:
7430   case Instruction::Shl:
7431   case Instruction::LShr:
7432   case Instruction::AShr:
7433   case Instruction::And:
7434   case Instruction::Or:
7435   case Instruction::Xor: {
7436     // Since we will replace the stride by 1 the multiplication should go away.
7437     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7438       return 0;
7439 
7440     // Detect reduction patterns
7441     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7442       return *RedCost;
7443 
7444     // Certain instructions can be cheaper to vectorize if they have a constant
7445     // second vector operand. One example of this are shifts on x86.
7446     Value *Op2 = I->getOperand(1);
7447     TargetTransformInfo::OperandValueProperties Op2VP;
7448     TargetTransformInfo::OperandValueKind Op2VK =
7449         TTI.getOperandInfo(Op2, Op2VP);
7450     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7451       Op2VK = TargetTransformInfo::OK_UniformValue;
7452 
7453     SmallVector<const Value *, 4> Operands(I->operand_values());
7454     return TTI.getArithmeticInstrCost(
7455         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7456         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7457   }
7458   case Instruction::FNeg: {
7459     return TTI.getArithmeticInstrCost(
7460         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7461         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7462         TargetTransformInfo::OP_None, I->getOperand(0), I);
7463   }
7464   case Instruction::Select: {
7465     SelectInst *SI = cast<SelectInst>(I);
7466     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7467     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7468 
7469     const Value *Op0, *Op1;
7470     using namespace llvm::PatternMatch;
7471     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7472                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7473       // select x, y, false --> x & y
7474       // select x, true, y --> x | y
7475       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7476       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7477       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7478       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7479       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7480               Op1->getType()->getScalarSizeInBits() == 1);
7481 
7482       SmallVector<const Value *, 2> Operands{Op0, Op1};
7483       return TTI.getArithmeticInstrCost(
7484           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7485           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7486     }
7487 
7488     Type *CondTy = SI->getCondition()->getType();
7489     if (!ScalarCond)
7490       CondTy = VectorType::get(CondTy, VF);
7491 
7492     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7493     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7494       Pred = Cmp->getPredicate();
7495     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7496                                   CostKind, I);
7497   }
7498   case Instruction::ICmp:
7499   case Instruction::FCmp: {
7500     Type *ValTy = I->getOperand(0)->getType();
7501     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7502     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7503       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7504     VectorTy = ToVectorTy(ValTy, VF);
7505     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7506                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7507                                   I);
7508   }
7509   case Instruction::Store:
7510   case Instruction::Load: {
7511     ElementCount Width = VF;
7512     if (Width.isVector()) {
7513       InstWidening Decision = getWideningDecision(I, Width);
7514       assert(Decision != CM_Unknown &&
7515              "CM decision should be taken at this point");
7516       if (Decision == CM_Scalarize)
7517         Width = ElementCount::getFixed(1);
7518     }
7519     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7520     return getMemoryInstructionCost(I, VF);
7521   }
7522   case Instruction::BitCast:
7523     if (I->getType()->isPointerTy())
7524       return 0;
7525     LLVM_FALLTHROUGH;
7526   case Instruction::ZExt:
7527   case Instruction::SExt:
7528   case Instruction::FPToUI:
7529   case Instruction::FPToSI:
7530   case Instruction::FPExt:
7531   case Instruction::PtrToInt:
7532   case Instruction::IntToPtr:
7533   case Instruction::SIToFP:
7534   case Instruction::UIToFP:
7535   case Instruction::Trunc:
7536   case Instruction::FPTrunc: {
7537     // Computes the CastContextHint from a Load/Store instruction.
7538     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7539       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7540              "Expected a load or a store!");
7541 
7542       if (VF.isScalar() || !TheLoop->contains(I))
7543         return TTI::CastContextHint::Normal;
7544 
7545       switch (getWideningDecision(I, VF)) {
7546       case LoopVectorizationCostModel::CM_GatherScatter:
7547         return TTI::CastContextHint::GatherScatter;
7548       case LoopVectorizationCostModel::CM_Interleave:
7549         return TTI::CastContextHint::Interleave;
7550       case LoopVectorizationCostModel::CM_Scalarize:
7551       case LoopVectorizationCostModel::CM_Widen:
7552         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7553                                         : TTI::CastContextHint::Normal;
7554       case LoopVectorizationCostModel::CM_Widen_Reverse:
7555         return TTI::CastContextHint::Reversed;
7556       case LoopVectorizationCostModel::CM_Unknown:
7557         llvm_unreachable("Instr did not go through cost modelling?");
7558       }
7559 
7560       llvm_unreachable("Unhandled case!");
7561     };
7562 
7563     unsigned Opcode = I->getOpcode();
7564     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7565     // For Trunc, the context is the only user, which must be a StoreInst.
7566     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7567       if (I->hasOneUse())
7568         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7569           CCH = ComputeCCH(Store);
7570     }
7571     // For Z/Sext, the context is the operand, which must be a LoadInst.
7572     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7573              Opcode == Instruction::FPExt) {
7574       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7575         CCH = ComputeCCH(Load);
7576     }
7577 
7578     // We optimize the truncation of induction variables having constant
7579     // integer steps. The cost of these truncations is the same as the scalar
7580     // operation.
7581     if (isOptimizableIVTruncate(I, VF)) {
7582       auto *Trunc = cast<TruncInst>(I);
7583       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7584                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7585     }
7586 
7587     // Detect reduction patterns
7588     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7589       return *RedCost;
7590 
7591     Type *SrcScalarTy = I->getOperand(0)->getType();
7592     Type *SrcVecTy =
7593         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7594     if (canTruncateToMinimalBitwidth(I, VF)) {
7595       // This cast is going to be shrunk. This may remove the cast or it might
7596       // turn it into slightly different cast. For example, if MinBW == 16,
7597       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7598       //
7599       // Calculate the modified src and dest types.
7600       Type *MinVecTy = VectorTy;
7601       if (Opcode == Instruction::Trunc) {
7602         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7603         VectorTy =
7604             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7605       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7606         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7607         VectorTy =
7608             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7609       }
7610     }
7611 
7612     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7613   }
7614   case Instruction::Call: {
7615     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7616       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7617         return *RedCost;
7618     bool NeedToScalarize;
7619     CallInst *CI = cast<CallInst>(I);
7620     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7621     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7622       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7623       return std::min(CallCost, IntrinsicCost);
7624     }
7625     return CallCost;
7626   }
7627   case Instruction::ExtractValue:
7628     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7629   case Instruction::Alloca:
7630     // We cannot easily widen alloca to a scalable alloca, as
7631     // the result would need to be a vector of pointers.
7632     if (VF.isScalable())
7633       return InstructionCost::getInvalid();
7634     LLVM_FALLTHROUGH;
7635   default:
7636     // This opcode is unknown. Assume that it is the same as 'mul'.
7637     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7638   } // end of switch.
7639 }
7640 
7641 char LoopVectorize::ID = 0;
7642 
7643 static const char lv_name[] = "Loop Vectorization";
7644 
7645 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7646 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7647 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7648 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7649 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7650 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7651 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7652 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7653 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7654 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7655 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7656 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7657 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7658 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7659 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7660 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7661 
7662 namespace llvm {
7663 
7664 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7665 
7666 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7667                               bool VectorizeOnlyWhenForced) {
7668   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7669 }
7670 
7671 } // end namespace llvm
7672 
7673 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7674   // Check if the pointer operand of a load or store instruction is
7675   // consecutive.
7676   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7677     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7678   return false;
7679 }
7680 
7681 void LoopVectorizationCostModel::collectValuesToIgnore() {
7682   // Ignore ephemeral values.
7683   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7684 
7685   // Ignore type-promoting instructions we identified during reduction
7686   // detection.
7687   for (auto &Reduction : Legal->getReductionVars()) {
7688     const RecurrenceDescriptor &RedDes = Reduction.second;
7689     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7690     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7691   }
7692   // Ignore type-casting instructions we identified during induction
7693   // detection.
7694   for (auto &Induction : Legal->getInductionVars()) {
7695     const InductionDescriptor &IndDes = Induction.second;
7696     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7697     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7698   }
7699 }
7700 
7701 void LoopVectorizationCostModel::collectInLoopReductions() {
7702   for (auto &Reduction : Legal->getReductionVars()) {
7703     PHINode *Phi = Reduction.first;
7704     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7705 
7706     // We don't collect reductions that are type promoted (yet).
7707     if (RdxDesc.getRecurrenceType() != Phi->getType())
7708       continue;
7709 
7710     // If the target would prefer this reduction to happen "in-loop", then we
7711     // want to record it as such.
7712     unsigned Opcode = RdxDesc.getOpcode();
7713     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7714         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7715                                    TargetTransformInfo::ReductionFlags()))
7716       continue;
7717 
7718     // Check that we can correctly put the reductions into the loop, by
7719     // finding the chain of operations that leads from the phi to the loop
7720     // exit value.
7721     SmallVector<Instruction *, 4> ReductionOperations =
7722         RdxDesc.getReductionOpChain(Phi, TheLoop);
7723     bool InLoop = !ReductionOperations.empty();
7724     if (InLoop) {
7725       InLoopReductionChains[Phi] = ReductionOperations;
7726       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7727       Instruction *LastChain = Phi;
7728       for (auto *I : ReductionOperations) {
7729         InLoopReductionImmediateChains[I] = LastChain;
7730         LastChain = I;
7731       }
7732     }
7733     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7734                       << " reduction for phi: " << *Phi << "\n");
7735   }
7736 }
7737 
7738 // TODO: we could return a pair of values that specify the max VF and
7739 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7740 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7741 // doesn't have a cost model that can choose which plan to execute if
7742 // more than one is generated.
7743 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7744                                  LoopVectorizationCostModel &CM) {
7745   unsigned WidestType;
7746   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7747   return WidestVectorRegBits / WidestType;
7748 }
7749 
7750 VectorizationFactor
7751 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7752   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7753   ElementCount VF = UserVF;
7754   // Outer loop handling: They may require CFG and instruction level
7755   // transformations before even evaluating whether vectorization is profitable.
7756   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7757   // the vectorization pipeline.
7758   if (!OrigLoop->isInnermost()) {
7759     // If the user doesn't provide a vectorization factor, determine a
7760     // reasonable one.
7761     if (UserVF.isZero()) {
7762       VF = ElementCount::getFixed(determineVPlanVF(
7763           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7764               .getFixedSize(),
7765           CM));
7766       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7767 
7768       // Make sure we have a VF > 1 for stress testing.
7769       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7770         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7771                           << "overriding computed VF.\n");
7772         VF = ElementCount::getFixed(4);
7773       }
7774     }
7775     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7776     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7777            "VF needs to be a power of two");
7778     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7779                       << "VF " << VF << " to build VPlans.\n");
7780     buildVPlans(VF, VF);
7781 
7782     // For VPlan build stress testing, we bail out after VPlan construction.
7783     if (VPlanBuildStressTest)
7784       return VectorizationFactor::Disabled();
7785 
7786     return {VF, 0 /*Cost*/};
7787   }
7788 
7789   LLVM_DEBUG(
7790       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7791                 "VPlan-native path.\n");
7792   return VectorizationFactor::Disabled();
7793 }
7794 
7795 Optional<VectorizationFactor>
7796 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7797   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7798   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7799   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7800     return None;
7801 
7802   // Invalidate interleave groups if all blocks of loop will be predicated.
7803   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7804       !useMaskedInterleavedAccesses(*TTI)) {
7805     LLVM_DEBUG(
7806         dbgs()
7807         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7808            "which requires masked-interleaved support.\n");
7809     if (CM.InterleaveInfo.invalidateGroups())
7810       // Invalidating interleave groups also requires invalidating all decisions
7811       // based on them, which includes widening decisions and uniform and scalar
7812       // values.
7813       CM.invalidateCostModelingDecisions();
7814   }
7815 
7816   ElementCount MaxUserVF =
7817       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7818   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7819   if (!UserVF.isZero() && UserVFIsLegal) {
7820     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7821            "VF needs to be a power of two");
7822     // Collect the instructions (and their associated costs) that will be more
7823     // profitable to scalarize.
7824     if (CM.selectUserVectorizationFactor(UserVF)) {
7825       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7826       CM.collectInLoopReductions();
7827       buildVPlansWithVPRecipes(UserVF, UserVF);
7828       LLVM_DEBUG(printPlans(dbgs()));
7829       return {{UserVF, 0}};
7830     } else
7831       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7832                               "InvalidCost", ORE, OrigLoop);
7833   }
7834 
7835   // Populate the set of Vectorization Factor Candidates.
7836   ElementCountSet VFCandidates;
7837   for (auto VF = ElementCount::getFixed(1);
7838        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7839     VFCandidates.insert(VF);
7840   for (auto VF = ElementCount::getScalable(1);
7841        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7842     VFCandidates.insert(VF);
7843 
7844   for (const auto &VF : VFCandidates) {
7845     // Collect Uniform and Scalar instructions after vectorization with VF.
7846     CM.collectUniformsAndScalars(VF);
7847 
7848     // Collect the instructions (and their associated costs) that will be more
7849     // profitable to scalarize.
7850     if (VF.isVector())
7851       CM.collectInstsToScalarize(VF);
7852   }
7853 
7854   CM.collectInLoopReductions();
7855   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7856   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7857 
7858   LLVM_DEBUG(printPlans(dbgs()));
7859   if (!MaxFactors.hasVector())
7860     return VectorizationFactor::Disabled();
7861 
7862   // Select the optimal vectorization factor.
7863   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7864 
7865   // Check if it is profitable to vectorize with runtime checks.
7866   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7867   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7868     bool PragmaThresholdReached =
7869         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7870     bool ThresholdReached =
7871         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7872     if ((ThresholdReached && !Hints.allowReordering()) ||
7873         PragmaThresholdReached) {
7874       ORE->emit([&]() {
7875         return OptimizationRemarkAnalysisAliasing(
7876                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7877                    OrigLoop->getHeader())
7878                << "loop not vectorized: cannot prove it is safe to reorder "
7879                   "memory operations";
7880       });
7881       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7882       Hints.emitRemarkWithHints();
7883       return VectorizationFactor::Disabled();
7884     }
7885   }
7886   return SelectedVF;
7887 }
7888 
7889 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7890   assert(count_if(VPlans,
7891                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7892              1 &&
7893          "Best VF has not a single VPlan.");
7894 
7895   for (const VPlanPtr &Plan : VPlans) {
7896     if (Plan->hasVF(VF))
7897       return *Plan.get();
7898   }
7899   llvm_unreachable("No plan found!");
7900 }
7901 
7902 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7903                                            VPlan &BestVPlan,
7904                                            InnerLoopVectorizer &ILV,
7905                                            DominatorTree *DT) {
7906   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7907                     << '\n');
7908 
7909   // Perform the actual loop transformation.
7910 
7911   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7912   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7913   Value *CanonicalIVStartValue;
7914   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7915       ILV.createVectorizedLoopSkeleton();
7916   ILV.collectPoisonGeneratingRecipes(State);
7917 
7918   ILV.printDebugTracesAtStart();
7919 
7920   //===------------------------------------------------===//
7921   //
7922   // Notice: any optimization or new instruction that go
7923   // into the code below should also be implemented in
7924   // the cost-model.
7925   //
7926   //===------------------------------------------------===//
7927 
7928   // 2. Copy and widen instructions from the old loop into the new loop.
7929   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7930                              ILV.getOrCreateVectorTripCount(nullptr),
7931                              CanonicalIVStartValue, State);
7932   BestVPlan.execute(&State);
7933 
7934   // Keep all loop hints from the original loop on the vector loop (we'll
7935   // replace the vectorizer-specific hints below).
7936   MDNode *OrigLoopID = OrigLoop->getLoopID();
7937 
7938   Optional<MDNode *> VectorizedLoopID =
7939       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7940                                       LLVMLoopVectorizeFollowupVectorized});
7941 
7942   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7943   if (VectorizedLoopID.hasValue())
7944     L->setLoopID(VectorizedLoopID.getValue());
7945   else {
7946     // Keep all loop hints from the original loop on the vector loop (we'll
7947     // replace the vectorizer-specific hints below).
7948     if (MDNode *LID = OrigLoop->getLoopID())
7949       L->setLoopID(LID);
7950 
7951     LoopVectorizeHints Hints(L, true, *ORE);
7952     Hints.setAlreadyVectorized();
7953   }
7954 
7955   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7956   //    predication, updating analyses.
7957   ILV.fixVectorizedLoop(State);
7958 
7959   ILV.printDebugTracesAtEnd();
7960 }
7961 
7962 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7963 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7964   for (const auto &Plan : VPlans)
7965     if (PrintVPlansInDotFormat)
7966       Plan->printDOT(O);
7967     else
7968       Plan->print(O);
7969 }
7970 #endif
7971 
7972 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7973     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7974 
7975   // We create new control-flow for the vectorized loop, so the original exit
7976   // conditions will be dead after vectorization if it's only used by the
7977   // terminator
7978   SmallVector<BasicBlock*> ExitingBlocks;
7979   OrigLoop->getExitingBlocks(ExitingBlocks);
7980   for (auto *BB : ExitingBlocks) {
7981     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7982     if (!Cmp || !Cmp->hasOneUse())
7983       continue;
7984 
7985     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7986     if (!DeadInstructions.insert(Cmp).second)
7987       continue;
7988 
7989     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7990     // TODO: can recurse through operands in general
7991     for (Value *Op : Cmp->operands()) {
7992       if (isa<TruncInst>(Op) && Op->hasOneUse())
7993           DeadInstructions.insert(cast<Instruction>(Op));
7994     }
7995   }
7996 
7997   // We create new "steps" for induction variable updates to which the original
7998   // induction variables map. An original update instruction will be dead if
7999   // all its users except the induction variable are dead.
8000   auto *Latch = OrigLoop->getLoopLatch();
8001   for (auto &Induction : Legal->getInductionVars()) {
8002     PHINode *Ind = Induction.first;
8003     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8004 
8005     // If the tail is to be folded by masking, the primary induction variable,
8006     // if exists, isn't dead: it will be used for masking. Don't kill it.
8007     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8008       continue;
8009 
8010     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8011           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8012         }))
8013       DeadInstructions.insert(IndUpdate);
8014   }
8015 }
8016 
8017 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8018 
8019 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8020   SmallVector<Metadata *, 4> MDs;
8021   // Reserve first location for self reference to the LoopID metadata node.
8022   MDs.push_back(nullptr);
8023   bool IsUnrollMetadata = false;
8024   MDNode *LoopID = L->getLoopID();
8025   if (LoopID) {
8026     // First find existing loop unrolling disable metadata.
8027     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8028       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8029       if (MD) {
8030         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8031         IsUnrollMetadata =
8032             S && S->getString().startswith("llvm.loop.unroll.disable");
8033       }
8034       MDs.push_back(LoopID->getOperand(i));
8035     }
8036   }
8037 
8038   if (!IsUnrollMetadata) {
8039     // Add runtime unroll disable metadata.
8040     LLVMContext &Context = L->getHeader()->getContext();
8041     SmallVector<Metadata *, 1> DisableOperands;
8042     DisableOperands.push_back(
8043         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8044     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8045     MDs.push_back(DisableNode);
8046     MDNode *NewLoopID = MDNode::get(Context, MDs);
8047     // Set operand 0 to refer to the loop id itself.
8048     NewLoopID->replaceOperandWith(0, NewLoopID);
8049     L->setLoopID(NewLoopID);
8050   }
8051 }
8052 
8053 //===--------------------------------------------------------------------===//
8054 // EpilogueVectorizerMainLoop
8055 //===--------------------------------------------------------------------===//
8056 
8057 /// This function is partially responsible for generating the control flow
8058 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8059 std::pair<BasicBlock *, Value *>
8060 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8061   MDNode *OrigLoopID = OrigLoop->getLoopID();
8062   Loop *Lp = createVectorLoopSkeleton("");
8063 
8064   // Generate the code to check the minimum iteration count of the vector
8065   // epilogue (see below).
8066   EPI.EpilogueIterationCountCheck =
8067       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8068   EPI.EpilogueIterationCountCheck->setName("iter.check");
8069 
8070   // Generate the code to check any assumptions that we've made for SCEV
8071   // expressions.
8072   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8073 
8074   // Generate the code that checks at runtime if arrays overlap. We put the
8075   // checks into a separate block to make the more common case of few elements
8076   // faster.
8077   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8078 
8079   // Generate the iteration count check for the main loop, *after* the check
8080   // for the epilogue loop, so that the path-length is shorter for the case
8081   // that goes directly through the vector epilogue. The longer-path length for
8082   // the main loop is compensated for, by the gain from vectorizing the larger
8083   // trip count. Note: the branch will get updated later on when we vectorize
8084   // the epilogue.
8085   EPI.MainLoopIterationCountCheck =
8086       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8087 
8088   // Generate the induction variable.
8089   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8090   EPI.VectorTripCount = CountRoundDown;
8091   createHeaderBranch(Lp);
8092 
8093   // Skip induction resume value creation here because they will be created in
8094   // the second pass. If we created them here, they wouldn't be used anyway,
8095   // because the vplan in the second pass still contains the inductions from the
8096   // original loop.
8097 
8098   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
8099 }
8100 
8101 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8102   LLVM_DEBUG({
8103     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8104            << "Main Loop VF:" << EPI.MainLoopVF
8105            << ", Main Loop UF:" << EPI.MainLoopUF
8106            << ", Epilogue Loop VF:" << EPI.EpilogueVF
8107            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8108   });
8109 }
8110 
8111 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8112   DEBUG_WITH_TYPE(VerboseDebug, {
8113     dbgs() << "intermediate fn:\n"
8114            << *OrigLoop->getHeader()->getParent() << "\n";
8115   });
8116 }
8117 
8118 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8119     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8120   assert(L && "Expected valid Loop.");
8121   assert(Bypass && "Expected valid bypass basic block.");
8122   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
8123   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8124   Value *Count = getOrCreateTripCount(L);
8125   // Reuse existing vector loop preheader for TC checks.
8126   // Note that new preheader block is generated for vector loop.
8127   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8128   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8129 
8130   // Generate code to check if the loop's trip count is less than VF * UF of the
8131   // main vector loop.
8132   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8133       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8134 
8135   Value *CheckMinIters = Builder.CreateICmp(
8136       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
8137       "min.iters.check");
8138 
8139   if (!ForEpilogue)
8140     TCCheckBlock->setName("vector.main.loop.iter.check");
8141 
8142   // Create new preheader for vector loop.
8143   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8144                                    DT, LI, nullptr, "vector.ph");
8145 
8146   if (ForEpilogue) {
8147     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8148                                  DT->getNode(Bypass)->getIDom()) &&
8149            "TC check is expected to dominate Bypass");
8150 
8151     // Update dominator for Bypass & LoopExit.
8152     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8153     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8154       // For loops with multiple exits, there's no edge from the middle block
8155       // to exit blocks (as the epilogue must run) and thus no need to update
8156       // the immediate dominator of the exit blocks.
8157       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8158 
8159     LoopBypassBlocks.push_back(TCCheckBlock);
8160 
8161     // Save the trip count so we don't have to regenerate it in the
8162     // vec.epilog.iter.check. This is safe to do because the trip count
8163     // generated here dominates the vector epilog iter check.
8164     EPI.TripCount = Count;
8165   }
8166 
8167   ReplaceInstWithInst(
8168       TCCheckBlock->getTerminator(),
8169       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8170 
8171   return TCCheckBlock;
8172 }
8173 
8174 //===--------------------------------------------------------------------===//
8175 // EpilogueVectorizerEpilogueLoop
8176 //===--------------------------------------------------------------------===//
8177 
8178 /// This function is partially responsible for generating the control flow
8179 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8180 std::pair<BasicBlock *, Value *>
8181 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8182   MDNode *OrigLoopID = OrigLoop->getLoopID();
8183   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8184 
8185   // Now, compare the remaining count and if there aren't enough iterations to
8186   // execute the vectorized epilogue skip to the scalar part.
8187   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8188   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8189   LoopVectorPreHeader =
8190       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8191                  LI, nullptr, "vec.epilog.ph");
8192   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8193                                           VecEpilogueIterationCountCheck);
8194 
8195   // Adjust the control flow taking the state info from the main loop
8196   // vectorization into account.
8197   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8198          "expected this to be saved from the previous pass.");
8199   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8200       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8201 
8202   DT->changeImmediateDominator(LoopVectorPreHeader,
8203                                EPI.MainLoopIterationCountCheck);
8204 
8205   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8206       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8207 
8208   if (EPI.SCEVSafetyCheck)
8209     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8210         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8211   if (EPI.MemSafetyCheck)
8212     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8213         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8214 
8215   DT->changeImmediateDominator(
8216       VecEpilogueIterationCountCheck,
8217       VecEpilogueIterationCountCheck->getSinglePredecessor());
8218 
8219   DT->changeImmediateDominator(LoopScalarPreHeader,
8220                                EPI.EpilogueIterationCountCheck);
8221   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8222     // If there is an epilogue which must run, there's no edge from the
8223     // middle block to exit blocks  and thus no need to update the immediate
8224     // dominator of the exit blocks.
8225     DT->changeImmediateDominator(LoopExitBlock,
8226                                  EPI.EpilogueIterationCountCheck);
8227 
8228   // Keep track of bypass blocks, as they feed start values to the induction
8229   // phis in the scalar loop preheader.
8230   if (EPI.SCEVSafetyCheck)
8231     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8232   if (EPI.MemSafetyCheck)
8233     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8234   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8235 
8236   // Generate a resume induction for the vector epilogue and put it in the
8237   // vector epilogue preheader
8238   Type *IdxTy = Legal->getWidestInductionType();
8239   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8240                                          LoopVectorPreHeader->getFirstNonPHI());
8241   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8242   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8243                            EPI.MainLoopIterationCountCheck);
8244 
8245   // Generate the induction variable.
8246   createHeaderBranch(Lp);
8247 
8248   // Generate induction resume values. These variables save the new starting
8249   // indexes for the scalar loop. They are used to test if there are any tail
8250   // iterations left once the vector loop has completed.
8251   // Note that when the vectorized epilogue is skipped due to iteration count
8252   // check, then the resume value for the induction variable comes from
8253   // the trip count of the main vector loop, hence passing the AdditionalBypass
8254   // argument.
8255   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8256                                    EPI.VectorTripCount} /* AdditionalBypass */);
8257 
8258   AddRuntimeUnrollDisableMetaData(Lp);
8259   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8260 }
8261 
8262 BasicBlock *
8263 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8264     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8265 
8266   assert(EPI.TripCount &&
8267          "Expected trip count to have been safed in the first pass.");
8268   assert(
8269       (!isa<Instruction>(EPI.TripCount) ||
8270        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8271       "saved trip count does not dominate insertion point.");
8272   Value *TC = EPI.TripCount;
8273   IRBuilder<> Builder(Insert->getTerminator());
8274   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8275 
8276   // Generate code to check if the loop's trip count is less than VF * UF of the
8277   // vector epilogue loop.
8278   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8279       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8280 
8281   Value *CheckMinIters =
8282       Builder.CreateICmp(P, Count,
8283                          createStepForVF(Builder, Count->getType(),
8284                                          EPI.EpilogueVF, EPI.EpilogueUF),
8285                          "min.epilog.iters.check");
8286 
8287   ReplaceInstWithInst(
8288       Insert->getTerminator(),
8289       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8290 
8291   LoopBypassBlocks.push_back(Insert);
8292   return Insert;
8293 }
8294 
8295 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8296   LLVM_DEBUG({
8297     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8298            << "Epilogue Loop VF:" << EPI.EpilogueVF
8299            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8300   });
8301 }
8302 
8303 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8304   DEBUG_WITH_TYPE(VerboseDebug, {
8305     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8306   });
8307 }
8308 
8309 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8310     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8311   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8312   bool PredicateAtRangeStart = Predicate(Range.Start);
8313 
8314   for (ElementCount TmpVF = Range.Start * 2;
8315        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8316     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8317       Range.End = TmpVF;
8318       break;
8319     }
8320 
8321   return PredicateAtRangeStart;
8322 }
8323 
8324 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8325 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8326 /// of VF's starting at a given VF and extending it as much as possible. Each
8327 /// vectorization decision can potentially shorten this sub-range during
8328 /// buildVPlan().
8329 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8330                                            ElementCount MaxVF) {
8331   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8332   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8333     VFRange SubRange = {VF, MaxVFPlusOne};
8334     VPlans.push_back(buildVPlan(SubRange));
8335     VF = SubRange.End;
8336   }
8337 }
8338 
8339 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8340                                          VPlanPtr &Plan) {
8341   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8342 
8343   // Look for cached value.
8344   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8345   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8346   if (ECEntryIt != EdgeMaskCache.end())
8347     return ECEntryIt->second;
8348 
8349   VPValue *SrcMask = createBlockInMask(Src, Plan);
8350 
8351   // The terminator has to be a branch inst!
8352   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8353   assert(BI && "Unexpected terminator found");
8354 
8355   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8356     return EdgeMaskCache[Edge] = SrcMask;
8357 
8358   // If source is an exiting block, we know the exit edge is dynamically dead
8359   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8360   // adding uses of an otherwise potentially dead instruction.
8361   if (OrigLoop->isLoopExiting(Src))
8362     return EdgeMaskCache[Edge] = SrcMask;
8363 
8364   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8365   assert(EdgeMask && "No Edge Mask found for condition");
8366 
8367   if (BI->getSuccessor(0) != Dst)
8368     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8369 
8370   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8371     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8372     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8373     // The select version does not introduce new UB if SrcMask is false and
8374     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8375     VPValue *False = Plan->getOrAddVPValue(
8376         ConstantInt::getFalse(BI->getCondition()->getType()));
8377     EdgeMask =
8378         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8379   }
8380 
8381   return EdgeMaskCache[Edge] = EdgeMask;
8382 }
8383 
8384 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8385   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8386 
8387   // Look for cached value.
8388   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8389   if (BCEntryIt != BlockMaskCache.end())
8390     return BCEntryIt->second;
8391 
8392   // All-one mask is modelled as no-mask following the convention for masked
8393   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8394   VPValue *BlockMask = nullptr;
8395 
8396   if (OrigLoop->getHeader() == BB) {
8397     if (!CM.blockNeedsPredicationForAnyReason(BB))
8398       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8399 
8400     // Introduce the early-exit compare IV <= BTC to form header block mask.
8401     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8402     // constructing the desired canonical IV in the header block as its first
8403     // non-phi instructions.
8404     assert(CM.foldTailByMasking() && "must fold the tail");
8405     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8406     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8407 
8408     VPValue *IV = nullptr;
8409     if (Legal->getPrimaryInduction())
8410       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8411     else {
8412       auto *IVRecipe = new VPWidenCanonicalIVRecipe();
8413       HeaderVPBB->insert(IVRecipe, NewInsertionPoint);
8414       IV = IVRecipe;
8415     }
8416 
8417     VPBuilder::InsertPointGuard Guard(Builder);
8418     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8419     if (CM.TTI.emitGetActiveLaneMask()) {
8420       VPValue *TC = Plan->getOrCreateTripCount();
8421       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8422     } else {
8423       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8424       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8425     }
8426     return BlockMaskCache[BB] = BlockMask;
8427   }
8428 
8429   // This is the block mask. We OR all incoming edges.
8430   for (auto *Predecessor : predecessors(BB)) {
8431     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8432     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8433       return BlockMaskCache[BB] = EdgeMask;
8434 
8435     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8436       BlockMask = EdgeMask;
8437       continue;
8438     }
8439 
8440     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8441   }
8442 
8443   return BlockMaskCache[BB] = BlockMask;
8444 }
8445 
8446 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8447                                                 ArrayRef<VPValue *> Operands,
8448                                                 VFRange &Range,
8449                                                 VPlanPtr &Plan) {
8450   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8451          "Must be called with either a load or store");
8452 
8453   auto willWiden = [&](ElementCount VF) -> bool {
8454     if (VF.isScalar())
8455       return false;
8456     LoopVectorizationCostModel::InstWidening Decision =
8457         CM.getWideningDecision(I, VF);
8458     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8459            "CM decision should be taken at this point.");
8460     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8461       return true;
8462     if (CM.isScalarAfterVectorization(I, VF) ||
8463         CM.isProfitableToScalarize(I, VF))
8464       return false;
8465     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8466   };
8467 
8468   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8469     return nullptr;
8470 
8471   VPValue *Mask = nullptr;
8472   if (Legal->isMaskRequired(I))
8473     Mask = createBlockInMask(I->getParent(), Plan);
8474 
8475   // Determine if the pointer operand of the access is either consecutive or
8476   // reverse consecutive.
8477   LoopVectorizationCostModel::InstWidening Decision =
8478       CM.getWideningDecision(I, Range.Start);
8479   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8480   bool Consecutive =
8481       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8482 
8483   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8484     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8485                                               Consecutive, Reverse);
8486 
8487   StoreInst *Store = cast<StoreInst>(I);
8488   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8489                                             Mask, Consecutive, Reverse);
8490 }
8491 
8492 VPWidenIntOrFpInductionRecipe *
8493 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8494                                            ArrayRef<VPValue *> Operands) const {
8495   // Check if this is an integer or fp induction. If so, build the recipe that
8496   // produces its scalar and vector values.
8497   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) {
8498     assert(II->getStartValue() ==
8499            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8500     return new VPWidenIntOrFpInductionRecipe(Phi, Operands[0], *II);
8501   }
8502 
8503   return nullptr;
8504 }
8505 
8506 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8507     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8508     VPlan &Plan) const {
8509   // Optimize the special case where the source is a constant integer
8510   // induction variable. Notice that we can only optimize the 'trunc' case
8511   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8512   // (c) other casts depend on pointer size.
8513 
8514   // Determine whether \p K is a truncation based on an induction variable that
8515   // can be optimized.
8516   auto isOptimizableIVTruncate =
8517       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8518     return [=](ElementCount VF) -> bool {
8519       return CM.isOptimizableIVTruncate(K, VF);
8520     };
8521   };
8522 
8523   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8524           isOptimizableIVTruncate(I), Range)) {
8525 
8526     auto *Phi = cast<PHINode>(I->getOperand(0));
8527     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8528     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8529     return new VPWidenIntOrFpInductionRecipe(Phi, Start, II, I);
8530   }
8531   return nullptr;
8532 }
8533 
8534 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8535                                                 ArrayRef<VPValue *> Operands,
8536                                                 VPlanPtr &Plan) {
8537   // If all incoming values are equal, the incoming VPValue can be used directly
8538   // instead of creating a new VPBlendRecipe.
8539   VPValue *FirstIncoming = Operands[0];
8540   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8541         return FirstIncoming == Inc;
8542       })) {
8543     return Operands[0];
8544   }
8545 
8546   // We know that all PHIs in non-header blocks are converted into selects, so
8547   // we don't have to worry about the insertion order and we can just use the
8548   // builder. At this point we generate the predication tree. There may be
8549   // duplications since this is a simple recursive scan, but future
8550   // optimizations will clean it up.
8551   SmallVector<VPValue *, 2> OperandsWithMask;
8552   unsigned NumIncoming = Phi->getNumIncomingValues();
8553 
8554   for (unsigned In = 0; In < NumIncoming; In++) {
8555     VPValue *EdgeMask =
8556       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8557     assert((EdgeMask || NumIncoming == 1) &&
8558            "Multiple predecessors with one having a full mask");
8559     OperandsWithMask.push_back(Operands[In]);
8560     if (EdgeMask)
8561       OperandsWithMask.push_back(EdgeMask);
8562   }
8563   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8564 }
8565 
8566 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8567                                                    ArrayRef<VPValue *> Operands,
8568                                                    VFRange &Range) const {
8569 
8570   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8571       [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
8572       Range);
8573 
8574   if (IsPredicated)
8575     return nullptr;
8576 
8577   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8578   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8579              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8580              ID == Intrinsic::pseudoprobe ||
8581              ID == Intrinsic::experimental_noalias_scope_decl))
8582     return nullptr;
8583 
8584   auto willWiden = [&](ElementCount VF) -> bool {
8585     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8586     // The following case may be scalarized depending on the VF.
8587     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8588     // version of the instruction.
8589     // Is it beneficial to perform intrinsic call compared to lib call?
8590     bool NeedToScalarize = false;
8591     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8592     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8593     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8594     return UseVectorIntrinsic || !NeedToScalarize;
8595   };
8596 
8597   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8598     return nullptr;
8599 
8600   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8601   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8602 }
8603 
8604 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8605   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8606          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8607   // Instruction should be widened, unless it is scalar after vectorization,
8608   // scalarization is profitable or it is predicated.
8609   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8610     return CM.isScalarAfterVectorization(I, VF) ||
8611            CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
8612   };
8613   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8614                                                              Range);
8615 }
8616 
8617 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8618                                            ArrayRef<VPValue *> Operands) const {
8619   auto IsVectorizableOpcode = [](unsigned Opcode) {
8620     switch (Opcode) {
8621     case Instruction::Add:
8622     case Instruction::And:
8623     case Instruction::AShr:
8624     case Instruction::BitCast:
8625     case Instruction::FAdd:
8626     case Instruction::FCmp:
8627     case Instruction::FDiv:
8628     case Instruction::FMul:
8629     case Instruction::FNeg:
8630     case Instruction::FPExt:
8631     case Instruction::FPToSI:
8632     case Instruction::FPToUI:
8633     case Instruction::FPTrunc:
8634     case Instruction::FRem:
8635     case Instruction::FSub:
8636     case Instruction::ICmp:
8637     case Instruction::IntToPtr:
8638     case Instruction::LShr:
8639     case Instruction::Mul:
8640     case Instruction::Or:
8641     case Instruction::PtrToInt:
8642     case Instruction::SDiv:
8643     case Instruction::Select:
8644     case Instruction::SExt:
8645     case Instruction::Shl:
8646     case Instruction::SIToFP:
8647     case Instruction::SRem:
8648     case Instruction::Sub:
8649     case Instruction::Trunc:
8650     case Instruction::UDiv:
8651     case Instruction::UIToFP:
8652     case Instruction::URem:
8653     case Instruction::Xor:
8654     case Instruction::ZExt:
8655       return true;
8656     }
8657     return false;
8658   };
8659 
8660   if (!IsVectorizableOpcode(I->getOpcode()))
8661     return nullptr;
8662 
8663   // Success: widen this instruction.
8664   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8665 }
8666 
8667 void VPRecipeBuilder::fixHeaderPhis() {
8668   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8669   for (VPHeaderPHIRecipe *R : PhisToFix) {
8670     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8671     VPRecipeBase *IncR =
8672         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8673     R->addOperand(IncR->getVPSingleValue());
8674   }
8675 }
8676 
8677 VPBasicBlock *VPRecipeBuilder::handleReplication(
8678     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8679     VPlanPtr &Plan) {
8680   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8681       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8682       Range);
8683 
8684   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8685       [&](ElementCount VF) { return CM.isPredicatedInst(I, IsUniform); },
8686       Range);
8687 
8688   // Even if the instruction is not marked as uniform, there are certain
8689   // intrinsic calls that can be effectively treated as such, so we check for
8690   // them here. Conservatively, we only do this for scalable vectors, since
8691   // for fixed-width VFs we can always fall back on full scalarization.
8692   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8693     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8694     case Intrinsic::assume:
8695     case Intrinsic::lifetime_start:
8696     case Intrinsic::lifetime_end:
8697       // For scalable vectors if one of the operands is variant then we still
8698       // want to mark as uniform, which will generate one instruction for just
8699       // the first lane of the vector. We can't scalarize the call in the same
8700       // way as for fixed-width vectors because we don't know how many lanes
8701       // there are.
8702       //
8703       // The reasons for doing it this way for scalable vectors are:
8704       //   1. For the assume intrinsic generating the instruction for the first
8705       //      lane is still be better than not generating any at all. For
8706       //      example, the input may be a splat across all lanes.
8707       //   2. For the lifetime start/end intrinsics the pointer operand only
8708       //      does anything useful when the input comes from a stack object,
8709       //      which suggests it should always be uniform. For non-stack objects
8710       //      the effect is to poison the object, which still allows us to
8711       //      remove the call.
8712       IsUniform = true;
8713       break;
8714     default:
8715       break;
8716     }
8717   }
8718 
8719   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8720                                        IsUniform, IsPredicated);
8721   setRecipe(I, Recipe);
8722   Plan->addVPValue(I, Recipe);
8723 
8724   // Find if I uses a predicated instruction. If so, it will use its scalar
8725   // value. Avoid hoisting the insert-element which packs the scalar value into
8726   // a vector value, as that happens iff all users use the vector value.
8727   for (VPValue *Op : Recipe->operands()) {
8728     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8729     if (!PredR)
8730       continue;
8731     auto *RepR =
8732         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8733     assert(RepR->isPredicated() &&
8734            "expected Replicate recipe to be predicated");
8735     RepR->setAlsoPack(false);
8736   }
8737 
8738   // Finalize the recipe for Instr, first if it is not predicated.
8739   if (!IsPredicated) {
8740     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8741     VPBB->appendRecipe(Recipe);
8742     return VPBB;
8743   }
8744   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8745 
8746   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8747   assert(SingleSucc && "VPBB must have a single successor when handling "
8748                        "predicated replication.");
8749   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8750   // Record predicated instructions for above packing optimizations.
8751   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8752   VPBlockUtils::insertBlockAfter(Region, VPBB);
8753   auto *RegSucc = new VPBasicBlock();
8754   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8755   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8756   return RegSucc;
8757 }
8758 
8759 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8760                                                       VPRecipeBase *PredRecipe,
8761                                                       VPlanPtr &Plan) {
8762   // Instructions marked for predication are replicated and placed under an
8763   // if-then construct to prevent side-effects.
8764 
8765   // Generate recipes to compute the block mask for this region.
8766   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8767 
8768   // Build the triangular if-then region.
8769   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8770   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8771   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8772   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8773   auto *PHIRecipe = Instr->getType()->isVoidTy()
8774                         ? nullptr
8775                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8776   if (PHIRecipe) {
8777     Plan->removeVPValueFor(Instr);
8778     Plan->addVPValue(Instr, PHIRecipe);
8779   }
8780   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8781   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8782   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8783 
8784   // Note: first set Entry as region entry and then connect successors starting
8785   // from it in order, to propagate the "parent" of each VPBasicBlock.
8786   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8787   VPBlockUtils::connectBlocks(Pred, Exit);
8788 
8789   return Region;
8790 }
8791 
8792 VPRecipeOrVPValueTy
8793 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8794                                         ArrayRef<VPValue *> Operands,
8795                                         VFRange &Range, VPlanPtr &Plan) {
8796   // First, check for specific widening recipes that deal with calls, memory
8797   // operations, inductions and Phi nodes.
8798   if (auto *CI = dyn_cast<CallInst>(Instr))
8799     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8800 
8801   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8802     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8803 
8804   VPRecipeBase *Recipe;
8805   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8806     if (Phi->getParent() != OrigLoop->getHeader())
8807       return tryToBlend(Phi, Operands, Plan);
8808     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8809       return toVPRecipeResult(Recipe);
8810 
8811     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8812     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8813       VPValue *StartV = Operands[0];
8814       if (Legal->isReductionVariable(Phi)) {
8815         const RecurrenceDescriptor &RdxDesc =
8816             Legal->getReductionVars().find(Phi)->second;
8817         assert(RdxDesc.getRecurrenceStartValue() ==
8818                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8819         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8820                                              CM.isInLoopReduction(Phi),
8821                                              CM.useOrderedReductions(RdxDesc));
8822       } else {
8823         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8824       }
8825 
8826       // Record the incoming value from the backedge, so we can add the incoming
8827       // value from the backedge after all recipes have been created.
8828       recordRecipeOf(cast<Instruction>(
8829           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8830       PhisToFix.push_back(PhiRecipe);
8831     } else {
8832       // TODO: record backedge value for remaining pointer induction phis.
8833       assert(Phi->getType()->isPointerTy() &&
8834              "only pointer phis should be handled here");
8835       assert(Legal->getInductionVars().count(Phi) &&
8836              "Not an induction variable");
8837       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8838       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8839       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8840     }
8841 
8842     return toVPRecipeResult(PhiRecipe);
8843   }
8844 
8845   if (isa<TruncInst>(Instr) &&
8846       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8847                                                Range, *Plan)))
8848     return toVPRecipeResult(Recipe);
8849 
8850   if (!shouldWiden(Instr, Range))
8851     return nullptr;
8852 
8853   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8854     return toVPRecipeResult(new VPWidenGEPRecipe(
8855         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8856 
8857   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8858     bool InvariantCond =
8859         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8860     return toVPRecipeResult(new VPWidenSelectRecipe(
8861         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8862   }
8863 
8864   return toVPRecipeResult(tryToWiden(Instr, Operands));
8865 }
8866 
8867 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8868                                                         ElementCount MaxVF) {
8869   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8870 
8871   // Collect instructions from the original loop that will become trivially dead
8872   // in the vectorized loop. We don't need to vectorize these instructions. For
8873   // example, original induction update instructions can become dead because we
8874   // separately emit induction "steps" when generating code for the new loop.
8875   // Similarly, we create a new latch condition when setting up the structure
8876   // of the new loop, so the old one can become dead.
8877   SmallPtrSet<Instruction *, 4> DeadInstructions;
8878   collectTriviallyDeadInstructions(DeadInstructions);
8879 
8880   // Add assume instructions we need to drop to DeadInstructions, to prevent
8881   // them from being added to the VPlan.
8882   // TODO: We only need to drop assumes in blocks that get flattend. If the
8883   // control flow is preserved, we should keep them.
8884   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8885   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8886 
8887   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8888   // Dead instructions do not need sinking. Remove them from SinkAfter.
8889   for (Instruction *I : DeadInstructions)
8890     SinkAfter.erase(I);
8891 
8892   // Cannot sink instructions after dead instructions (there won't be any
8893   // recipes for them). Instead, find the first non-dead previous instruction.
8894   for (auto &P : Legal->getSinkAfter()) {
8895     Instruction *SinkTarget = P.second;
8896     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8897     (void)FirstInst;
8898     while (DeadInstructions.contains(SinkTarget)) {
8899       assert(
8900           SinkTarget != FirstInst &&
8901           "Must find a live instruction (at least the one feeding the "
8902           "first-order recurrence PHI) before reaching beginning of the block");
8903       SinkTarget = SinkTarget->getPrevNode();
8904       assert(SinkTarget != P.first &&
8905              "sink source equals target, no sinking required");
8906     }
8907     P.second = SinkTarget;
8908   }
8909 
8910   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8911   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8912     VFRange SubRange = {VF, MaxVFPlusOne};
8913     VPlans.push_back(
8914         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8915     VF = SubRange.End;
8916   }
8917 }
8918 
8919 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header and a
8920 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF to the
8921 // latch.
8922 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8923                                   bool HasNUW, bool IsVPlanNative) {
8924   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8925   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8926 
8927   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8928   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8929   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8930   if (IsVPlanNative)
8931     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8932   Header->insert(CanonicalIVPHI, Header->begin());
8933 
8934   auto *CanonicalIVIncrement =
8935       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8936                                : VPInstruction::CanonicalIVIncrement,
8937                         {CanonicalIVPHI}, DL);
8938   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8939 
8940   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8941   if (IsVPlanNative)
8942     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8943   EB->appendRecipe(CanonicalIVIncrement);
8944 }
8945 
8946 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8947     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8948     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8949 
8950   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8951 
8952   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8953 
8954   // ---------------------------------------------------------------------------
8955   // Pre-construction: record ingredients whose recipes we'll need to further
8956   // process after constructing the initial VPlan.
8957   // ---------------------------------------------------------------------------
8958 
8959   // Mark instructions we'll need to sink later and their targets as
8960   // ingredients whose recipe we'll need to record.
8961   for (auto &Entry : SinkAfter) {
8962     RecipeBuilder.recordRecipeOf(Entry.first);
8963     RecipeBuilder.recordRecipeOf(Entry.second);
8964   }
8965   for (auto &Reduction : CM.getInLoopReductionChains()) {
8966     PHINode *Phi = Reduction.first;
8967     RecurKind Kind =
8968         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8969     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8970 
8971     RecipeBuilder.recordRecipeOf(Phi);
8972     for (auto &R : ReductionOperations) {
8973       RecipeBuilder.recordRecipeOf(R);
8974       // For min/max reducitons, where we have a pair of icmp/select, we also
8975       // need to record the ICmp recipe, so it can be removed later.
8976       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8977              "Only min/max recurrences allowed for inloop reductions");
8978       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8979         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8980     }
8981   }
8982 
8983   // For each interleave group which is relevant for this (possibly trimmed)
8984   // Range, add it to the set of groups to be later applied to the VPlan and add
8985   // placeholders for its members' Recipes which we'll be replacing with a
8986   // single VPInterleaveRecipe.
8987   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8988     auto applyIG = [IG, this](ElementCount VF) -> bool {
8989       return (VF.isVector() && // Query is illegal for VF == 1
8990               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8991                   LoopVectorizationCostModel::CM_Interleave);
8992     };
8993     if (!getDecisionAndClampRange(applyIG, Range))
8994       continue;
8995     InterleaveGroups.insert(IG);
8996     for (unsigned i = 0; i < IG->getFactor(); i++)
8997       if (Instruction *Member = IG->getMember(i))
8998         RecipeBuilder.recordRecipeOf(Member);
8999   };
9000 
9001   // ---------------------------------------------------------------------------
9002   // Build initial VPlan: Scan the body of the loop in a topological order to
9003   // visit each basic block after having visited its predecessor basic blocks.
9004   // ---------------------------------------------------------------------------
9005 
9006   // Create initial VPlan skeleton, with separate header and latch blocks.
9007   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
9008   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
9009   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
9010   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
9011   auto Plan = std::make_unique<VPlan>(TopRegion);
9012 
9013   Instruction *DLInst =
9014       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
9015   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
9016                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
9017                         !CM.foldTailByMasking(), false);
9018 
9019   // Scan the body of the loop in a topological order to visit each basic block
9020   // after having visited its predecessor basic blocks.
9021   LoopBlocksDFS DFS(OrigLoop);
9022   DFS.perform(LI);
9023 
9024   VPBasicBlock *VPBB = HeaderVPBB;
9025   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
9026   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9027     // Relevant instructions from basic block BB will be grouped into VPRecipe
9028     // ingredients and fill a new VPBasicBlock.
9029     unsigned VPBBsForBB = 0;
9030     VPBB->setName(BB->getName());
9031     Builder.setInsertPoint(VPBB);
9032 
9033     // Introduce each ingredient into VPlan.
9034     // TODO: Model and preserve debug instrinsics in VPlan.
9035     for (Instruction &I : BB->instructionsWithoutDebug()) {
9036       Instruction *Instr = &I;
9037 
9038       // First filter out irrelevant instructions, to ensure no recipes are
9039       // built for them.
9040       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9041         continue;
9042 
9043       SmallVector<VPValue *, 4> Operands;
9044       auto *Phi = dyn_cast<PHINode>(Instr);
9045       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9046         Operands.push_back(Plan->getOrAddVPValue(
9047             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9048       } else {
9049         auto OpRange = Plan->mapToVPValues(Instr->operands());
9050         Operands = {OpRange.begin(), OpRange.end()};
9051       }
9052       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9053               Instr, Operands, Range, Plan)) {
9054         // If Instr can be simplified to an existing VPValue, use it.
9055         if (RecipeOrValue.is<VPValue *>()) {
9056           auto *VPV = RecipeOrValue.get<VPValue *>();
9057           Plan->addVPValue(Instr, VPV);
9058           // If the re-used value is a recipe, register the recipe for the
9059           // instruction, in case the recipe for Instr needs to be recorded.
9060           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9061             RecipeBuilder.setRecipe(Instr, R);
9062           continue;
9063         }
9064         // Otherwise, add the new recipe.
9065         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9066         for (auto *Def : Recipe->definedValues()) {
9067           auto *UV = Def->getUnderlyingValue();
9068           Plan->addVPValue(UV, Def);
9069         }
9070 
9071         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
9072             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
9073           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
9074           // of the header block. That can happen for truncates of induction
9075           // variables. Those recipes are moved to the phi section of the header
9076           // block after applying SinkAfter, which relies on the original
9077           // position of the trunc.
9078           assert(isa<TruncInst>(Instr));
9079           InductionsToMove.push_back(
9080               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
9081         }
9082         RecipeBuilder.setRecipe(Instr, Recipe);
9083         VPBB->appendRecipe(Recipe);
9084         continue;
9085       }
9086 
9087       // Otherwise, if all widening options failed, Instruction is to be
9088       // replicated. This may create a successor for VPBB.
9089       VPBasicBlock *NextVPBB =
9090           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9091       if (NextVPBB != VPBB) {
9092         VPBB = NextVPBB;
9093         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9094                                     : "");
9095       }
9096     }
9097 
9098     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
9099     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
9100   }
9101 
9102   // Fold the last, empty block into its predecessor.
9103   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
9104   assert(VPBB && "expected to fold last (empty) block");
9105   // After here, VPBB should not be used.
9106   VPBB = nullptr;
9107 
9108   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
9109          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
9110          "entry block must be set to a VPRegionBlock having a non-empty entry "
9111          "VPBasicBlock");
9112   RecipeBuilder.fixHeaderPhis();
9113 
9114   // ---------------------------------------------------------------------------
9115   // Transform initial VPlan: Apply previously taken decisions, in order, to
9116   // bring the VPlan to its final state.
9117   // ---------------------------------------------------------------------------
9118 
9119   // Apply Sink-After legal constraints.
9120   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9121     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9122     if (Region && Region->isReplicator()) {
9123       assert(Region->getNumSuccessors() == 1 &&
9124              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9125       assert(R->getParent()->size() == 1 &&
9126              "A recipe in an original replicator region must be the only "
9127              "recipe in its block");
9128       return Region;
9129     }
9130     return nullptr;
9131   };
9132   for (auto &Entry : SinkAfter) {
9133     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9134     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9135 
9136     auto *TargetRegion = GetReplicateRegion(Target);
9137     auto *SinkRegion = GetReplicateRegion(Sink);
9138     if (!SinkRegion) {
9139       // If the sink source is not a replicate region, sink the recipe directly.
9140       if (TargetRegion) {
9141         // The target is in a replication region, make sure to move Sink to
9142         // the block after it, not into the replication region itself.
9143         VPBasicBlock *NextBlock =
9144             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9145         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9146       } else
9147         Sink->moveAfter(Target);
9148       continue;
9149     }
9150 
9151     // The sink source is in a replicate region. Unhook the region from the CFG.
9152     auto *SinkPred = SinkRegion->getSinglePredecessor();
9153     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9154     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9155     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9156     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9157 
9158     if (TargetRegion) {
9159       // The target recipe is also in a replicate region, move the sink region
9160       // after the target region.
9161       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9162       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9163       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9164       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9165     } else {
9166       // The sink source is in a replicate region, we need to move the whole
9167       // replicate region, which should only contain a single recipe in the
9168       // main block.
9169       auto *SplitBlock =
9170           Target->getParent()->splitAt(std::next(Target->getIterator()));
9171 
9172       auto *SplitPred = SplitBlock->getSinglePredecessor();
9173 
9174       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9175       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9176       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9177     }
9178   }
9179 
9180   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9181 
9182   // Now that sink-after is done, move induction recipes for optimized truncates
9183   // to the phi section of the header block.
9184   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9185     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9186 
9187   // Adjust the recipes for any inloop reductions.
9188   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9189                              RecipeBuilder, Range.Start);
9190 
9191   // Introduce a recipe to combine the incoming and previous values of a
9192   // first-order recurrence.
9193   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9194     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9195     if (!RecurPhi)
9196       continue;
9197 
9198     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9199     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9200     auto *Region = GetReplicateRegion(PrevRecipe);
9201     if (Region)
9202       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9203     if (Region || PrevRecipe->isPhi())
9204       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9205     else
9206       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9207 
9208     auto *RecurSplice = cast<VPInstruction>(
9209         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9210                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9211 
9212     RecurPhi->replaceAllUsesWith(RecurSplice);
9213     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9214     // all users.
9215     RecurSplice->setOperand(0, RecurPhi);
9216   }
9217 
9218   // Interleave memory: for each Interleave Group we marked earlier as relevant
9219   // for this VPlan, replace the Recipes widening its memory instructions with a
9220   // single VPInterleaveRecipe at its insertion point.
9221   for (auto IG : InterleaveGroups) {
9222     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9223         RecipeBuilder.getRecipe(IG->getInsertPos()));
9224     SmallVector<VPValue *, 4> StoredValues;
9225     for (unsigned i = 0; i < IG->getFactor(); ++i)
9226       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9227         auto *StoreR =
9228             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9229         StoredValues.push_back(StoreR->getStoredValue());
9230       }
9231 
9232     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9233                                         Recipe->getMask());
9234     VPIG->insertBefore(Recipe);
9235     unsigned J = 0;
9236     for (unsigned i = 0; i < IG->getFactor(); ++i)
9237       if (Instruction *Member = IG->getMember(i)) {
9238         if (!Member->getType()->isVoidTy()) {
9239           VPValue *OriginalV = Plan->getVPValue(Member);
9240           Plan->removeVPValueFor(Member);
9241           Plan->addVPValue(Member, VPIG->getVPValue(J));
9242           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9243           J++;
9244         }
9245         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9246       }
9247   }
9248 
9249   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9250   // in ways that accessing values using original IR values is incorrect.
9251   Plan->disableValue2VPValue();
9252 
9253   VPlanTransforms::sinkScalarOperands(*Plan);
9254   VPlanTransforms::mergeReplicateRegions(*Plan);
9255 
9256   std::string PlanName;
9257   raw_string_ostream RSO(PlanName);
9258   ElementCount VF = Range.Start;
9259   Plan->addVF(VF);
9260   RSO << "Initial VPlan for VF={" << VF;
9261   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9262     Plan->addVF(VF);
9263     RSO << "," << VF;
9264   }
9265   RSO << "},UF>=1";
9266   RSO.flush();
9267   Plan->setName(PlanName);
9268 
9269   // Fold Exit block into its predecessor if possible.
9270   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9271   // VPBasicBlock as exit.
9272   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9273 
9274   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9275   return Plan;
9276 }
9277 
9278 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9279   // Outer loop handling: They may require CFG and instruction level
9280   // transformations before even evaluating whether vectorization is profitable.
9281   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9282   // the vectorization pipeline.
9283   assert(!OrigLoop->isInnermost());
9284   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9285 
9286   // Create new empty VPlan
9287   auto Plan = std::make_unique<VPlan>();
9288 
9289   // Build hierarchical CFG
9290   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9291   HCFGBuilder.buildHierarchicalCFG();
9292 
9293   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9294        VF *= 2)
9295     Plan->addVF(VF);
9296 
9297   if (EnableVPlanPredication) {
9298     VPlanPredicator VPP(*Plan);
9299     VPP.predicate();
9300 
9301     // Avoid running transformation to recipes until masked code generation in
9302     // VPlan-native path is in place.
9303     return Plan;
9304   }
9305 
9306   SmallPtrSet<Instruction *, 1> DeadInstructions;
9307   VPlanTransforms::VPInstructionsToVPRecipes(
9308       OrigLoop, Plan,
9309       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9310       DeadInstructions, *PSE.getSE());
9311 
9312   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9313                         true, true);
9314   return Plan;
9315 }
9316 
9317 // Adjust the recipes for reductions. For in-loop reductions the chain of
9318 // instructions leading from the loop exit instr to the phi need to be converted
9319 // to reductions, with one operand being vector and the other being the scalar
9320 // reduction chain. For other reductions, a select is introduced between the phi
9321 // and live-out recipes when folding the tail.
9322 void LoopVectorizationPlanner::adjustRecipesForReductions(
9323     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9324     ElementCount MinVF) {
9325   for (auto &Reduction : CM.getInLoopReductionChains()) {
9326     PHINode *Phi = Reduction.first;
9327     const RecurrenceDescriptor &RdxDesc =
9328         Legal->getReductionVars().find(Phi)->second;
9329     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9330 
9331     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9332       continue;
9333 
9334     // ReductionOperations are orders top-down from the phi's use to the
9335     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9336     // which of the two operands will remain scalar and which will be reduced.
9337     // For minmax the chain will be the select instructions.
9338     Instruction *Chain = Phi;
9339     for (Instruction *R : ReductionOperations) {
9340       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9341       RecurKind Kind = RdxDesc.getRecurrenceKind();
9342 
9343       VPValue *ChainOp = Plan->getVPValue(Chain);
9344       unsigned FirstOpId;
9345       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9346              "Only min/max recurrences allowed for inloop reductions");
9347       // Recognize a call to the llvm.fmuladd intrinsic.
9348       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9349       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9350              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9351       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9352         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9353                "Expected to replace a VPWidenSelectSC");
9354         FirstOpId = 1;
9355       } else {
9356         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9357                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9358                "Expected to replace a VPWidenSC");
9359         FirstOpId = 0;
9360       }
9361       unsigned VecOpId =
9362           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9363       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9364 
9365       auto *CondOp = CM.foldTailByMasking()
9366                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9367                          : nullptr;
9368 
9369       if (IsFMulAdd) {
9370         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9371         // need to create an fmul recipe to use as the vector operand for the
9372         // fadd reduction.
9373         VPInstruction *FMulRecipe = new VPInstruction(
9374             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9375         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9376         WidenRecipe->getParent()->insert(FMulRecipe,
9377                                          WidenRecipe->getIterator());
9378         VecOp = FMulRecipe;
9379       }
9380       VPReductionRecipe *RedRecipe =
9381           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9382       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9383       Plan->removeVPValueFor(R);
9384       Plan->addVPValue(R, RedRecipe);
9385       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9386       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9387       WidenRecipe->eraseFromParent();
9388 
9389       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9390         VPRecipeBase *CompareRecipe =
9391             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9392         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9393                "Expected to replace a VPWidenSC");
9394         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9395                "Expected no remaining users");
9396         CompareRecipe->eraseFromParent();
9397       }
9398       Chain = R;
9399     }
9400   }
9401 
9402   // If tail is folded by masking, introduce selects between the phi
9403   // and the live-out instruction of each reduction, at the end of the latch.
9404   if (CM.foldTailByMasking()) {
9405     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9406       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9407       if (!PhiR || PhiR->isInLoop())
9408         continue;
9409       Builder.setInsertPoint(LatchVPBB);
9410       VPValue *Cond =
9411           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9412       VPValue *Red = PhiR->getBackedgeValue();
9413       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9414     }
9415   }
9416 }
9417 
9418 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9419 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9420                                VPSlotTracker &SlotTracker) const {
9421   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9422   IG->getInsertPos()->printAsOperand(O, false);
9423   O << ", ";
9424   getAddr()->printAsOperand(O, SlotTracker);
9425   VPValue *Mask = getMask();
9426   if (Mask) {
9427     O << ", ";
9428     Mask->printAsOperand(O, SlotTracker);
9429   }
9430 
9431   unsigned OpIdx = 0;
9432   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9433     if (!IG->getMember(i))
9434       continue;
9435     if (getNumStoreOperands() > 0) {
9436       O << "\n" << Indent << "  store ";
9437       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9438       O << " to index " << i;
9439     } else {
9440       O << "\n" << Indent << "  ";
9441       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9442       O << " = load from index " << i;
9443     }
9444     ++OpIdx;
9445   }
9446 }
9447 #endif
9448 
9449 void VPWidenCallRecipe::execute(VPTransformState &State) {
9450   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9451                                   *this, State);
9452 }
9453 
9454 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9455   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9456   State.ILV->setDebugLocFromInst(&I);
9457 
9458   // The condition can be loop invariant  but still defined inside the
9459   // loop. This means that we can't just use the original 'cond' value.
9460   // We have to take the 'vectorized' value and pick the first lane.
9461   // Instcombine will make this a no-op.
9462   auto *InvarCond =
9463       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9464 
9465   for (unsigned Part = 0; Part < State.UF; ++Part) {
9466     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9467     Value *Op0 = State.get(getOperand(1), Part);
9468     Value *Op1 = State.get(getOperand(2), Part);
9469     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9470     State.set(this, Sel, Part);
9471     State.ILV->addMetadata(Sel, &I);
9472   }
9473 }
9474 
9475 void VPWidenRecipe::execute(VPTransformState &State) {
9476   auto &I = *cast<Instruction>(getUnderlyingValue());
9477   auto &Builder = State.Builder;
9478   switch (I.getOpcode()) {
9479   case Instruction::Call:
9480   case Instruction::Br:
9481   case Instruction::PHI:
9482   case Instruction::GetElementPtr:
9483   case Instruction::Select:
9484     llvm_unreachable("This instruction is handled by a different recipe.");
9485   case Instruction::UDiv:
9486   case Instruction::SDiv:
9487   case Instruction::SRem:
9488   case Instruction::URem:
9489   case Instruction::Add:
9490   case Instruction::FAdd:
9491   case Instruction::Sub:
9492   case Instruction::FSub:
9493   case Instruction::FNeg:
9494   case Instruction::Mul:
9495   case Instruction::FMul:
9496   case Instruction::FDiv:
9497   case Instruction::FRem:
9498   case Instruction::Shl:
9499   case Instruction::LShr:
9500   case Instruction::AShr:
9501   case Instruction::And:
9502   case Instruction::Or:
9503   case Instruction::Xor: {
9504     // Just widen unops and binops.
9505     State.ILV->setDebugLocFromInst(&I);
9506 
9507     for (unsigned Part = 0; Part < State.UF; ++Part) {
9508       SmallVector<Value *, 2> Ops;
9509       for (VPValue *VPOp : operands())
9510         Ops.push_back(State.get(VPOp, Part));
9511 
9512       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9513 
9514       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9515         VecOp->copyIRFlags(&I);
9516 
9517         // If the instruction is vectorized and was in a basic block that needed
9518         // predication, we can't propagate poison-generating flags (nuw/nsw,
9519         // exact, etc.). The control flow has been linearized and the
9520         // instruction is no longer guarded by the predicate, which could make
9521         // the flag properties to no longer hold.
9522         if (State.MayGeneratePoisonRecipes.contains(this))
9523           VecOp->dropPoisonGeneratingFlags();
9524       }
9525 
9526       // Use this vector value for all users of the original instruction.
9527       State.set(this, V, Part);
9528       State.ILV->addMetadata(V, &I);
9529     }
9530 
9531     break;
9532   }
9533   case Instruction::ICmp:
9534   case Instruction::FCmp: {
9535     // Widen compares. Generate vector compares.
9536     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9537     auto *Cmp = cast<CmpInst>(&I);
9538     State.ILV->setDebugLocFromInst(Cmp);
9539     for (unsigned Part = 0; Part < State.UF; ++Part) {
9540       Value *A = State.get(getOperand(0), Part);
9541       Value *B = State.get(getOperand(1), Part);
9542       Value *C = nullptr;
9543       if (FCmp) {
9544         // Propagate fast math flags.
9545         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9546         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9547         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9548       } else {
9549         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9550       }
9551       State.set(this, C, Part);
9552       State.ILV->addMetadata(C, &I);
9553     }
9554 
9555     break;
9556   }
9557 
9558   case Instruction::ZExt:
9559   case Instruction::SExt:
9560   case Instruction::FPToUI:
9561   case Instruction::FPToSI:
9562   case Instruction::FPExt:
9563   case Instruction::PtrToInt:
9564   case Instruction::IntToPtr:
9565   case Instruction::SIToFP:
9566   case Instruction::UIToFP:
9567   case Instruction::Trunc:
9568   case Instruction::FPTrunc:
9569   case Instruction::BitCast: {
9570     auto *CI = cast<CastInst>(&I);
9571     State.ILV->setDebugLocFromInst(CI);
9572 
9573     /// Vectorize casts.
9574     Type *DestTy = (State.VF.isScalar())
9575                        ? CI->getType()
9576                        : VectorType::get(CI->getType(), State.VF);
9577 
9578     for (unsigned Part = 0; Part < State.UF; ++Part) {
9579       Value *A = State.get(getOperand(0), Part);
9580       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9581       State.set(this, Cast, Part);
9582       State.ILV->addMetadata(Cast, &I);
9583     }
9584     break;
9585   }
9586   default:
9587     // This instruction is not vectorized by simple widening.
9588     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9589     llvm_unreachable("Unhandled instruction!");
9590   } // end of switch.
9591 }
9592 
9593 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9594   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9595   // Construct a vector GEP by widening the operands of the scalar GEP as
9596   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9597   // results in a vector of pointers when at least one operand of the GEP
9598   // is vector-typed. Thus, to keep the representation compact, we only use
9599   // vector-typed operands for loop-varying values.
9600 
9601   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9602     // If we are vectorizing, but the GEP has only loop-invariant operands,
9603     // the GEP we build (by only using vector-typed operands for
9604     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9605     // produce a vector of pointers, we need to either arbitrarily pick an
9606     // operand to broadcast, or broadcast a clone of the original GEP.
9607     // Here, we broadcast a clone of the original.
9608     //
9609     // TODO: If at some point we decide to scalarize instructions having
9610     //       loop-invariant operands, this special case will no longer be
9611     //       required. We would add the scalarization decision to
9612     //       collectLoopScalars() and teach getVectorValue() to broadcast
9613     //       the lane-zero scalar value.
9614     auto *Clone = State.Builder.Insert(GEP->clone());
9615     for (unsigned Part = 0; Part < State.UF; ++Part) {
9616       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9617       State.set(this, EntryPart, Part);
9618       State.ILV->addMetadata(EntryPart, GEP);
9619     }
9620   } else {
9621     // If the GEP has at least one loop-varying operand, we are sure to
9622     // produce a vector of pointers. But if we are only unrolling, we want
9623     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9624     // produce with the code below will be scalar (if VF == 1) or vector
9625     // (otherwise). Note that for the unroll-only case, we still maintain
9626     // values in the vector mapping with initVector, as we do for other
9627     // instructions.
9628     for (unsigned Part = 0; Part < State.UF; ++Part) {
9629       // The pointer operand of the new GEP. If it's loop-invariant, we
9630       // won't broadcast it.
9631       auto *Ptr = IsPtrLoopInvariant
9632                       ? State.get(getOperand(0), VPIteration(0, 0))
9633                       : State.get(getOperand(0), Part);
9634 
9635       // Collect all the indices for the new GEP. If any index is
9636       // loop-invariant, we won't broadcast it.
9637       SmallVector<Value *, 4> Indices;
9638       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9639         VPValue *Operand = getOperand(I);
9640         if (IsIndexLoopInvariant[I - 1])
9641           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9642         else
9643           Indices.push_back(State.get(Operand, Part));
9644       }
9645 
9646       // If the GEP instruction is vectorized and was in a basic block that
9647       // needed predication, we can't propagate the poison-generating 'inbounds'
9648       // flag. The control flow has been linearized and the GEP is no longer
9649       // guarded by the predicate, which could make the 'inbounds' properties to
9650       // no longer hold.
9651       bool IsInBounds =
9652           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9653 
9654       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9655       // but it should be a vector, otherwise.
9656       auto *NewGEP = IsInBounds
9657                          ? State.Builder.CreateInBoundsGEP(
9658                                GEP->getSourceElementType(), Ptr, Indices)
9659                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9660                                                    Ptr, Indices);
9661       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9662              "NewGEP is not a pointer vector");
9663       State.set(this, NewGEP, Part);
9664       State.ILV->addMetadata(NewGEP, GEP);
9665     }
9666   }
9667 }
9668 
9669 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9670   assert(!State.Instance && "Int or FP induction being replicated.");
9671   auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9672   State.ILV->widenIntOrFpInduction(IV, getInductionDescriptor(),
9673                                    getStartValue()->getLiveInIRValue(),
9674                                    getTruncInst(), this, State, CanonicalIV);
9675 }
9676 
9677 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9678   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9679                                  State);
9680 }
9681 
9682 void VPBlendRecipe::execute(VPTransformState &State) {
9683   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9684   // We know that all PHIs in non-header blocks are converted into
9685   // selects, so we don't have to worry about the insertion order and we
9686   // can just use the builder.
9687   // At this point we generate the predication tree. There may be
9688   // duplications since this is a simple recursive scan, but future
9689   // optimizations will clean it up.
9690 
9691   unsigned NumIncoming = getNumIncomingValues();
9692 
9693   // Generate a sequence of selects of the form:
9694   // SELECT(Mask3, In3,
9695   //        SELECT(Mask2, In2,
9696   //               SELECT(Mask1, In1,
9697   //                      In0)))
9698   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9699   // are essentially undef are taken from In0.
9700   InnerLoopVectorizer::VectorParts Entry(State.UF);
9701   for (unsigned In = 0; In < NumIncoming; ++In) {
9702     for (unsigned Part = 0; Part < State.UF; ++Part) {
9703       // We might have single edge PHIs (blocks) - use an identity
9704       // 'select' for the first PHI operand.
9705       Value *In0 = State.get(getIncomingValue(In), Part);
9706       if (In == 0)
9707         Entry[Part] = In0; // Initialize with the first incoming value.
9708       else {
9709         // Select between the current value and the previous incoming edge
9710         // based on the incoming mask.
9711         Value *Cond = State.get(getMask(In), Part);
9712         Entry[Part] =
9713             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9714       }
9715     }
9716   }
9717   for (unsigned Part = 0; Part < State.UF; ++Part)
9718     State.set(this, Entry[Part], Part);
9719 }
9720 
9721 void VPInterleaveRecipe::execute(VPTransformState &State) {
9722   assert(!State.Instance && "Interleave group being replicated.");
9723   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9724                                       getStoredValues(), getMask());
9725 }
9726 
9727 void VPReductionRecipe::execute(VPTransformState &State) {
9728   assert(!State.Instance && "Reduction being replicated.");
9729   Value *PrevInChain = State.get(getChainOp(), 0);
9730   RecurKind Kind = RdxDesc->getRecurrenceKind();
9731   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9732   // Propagate the fast-math flags carried by the underlying instruction.
9733   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9734   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9735   for (unsigned Part = 0; Part < State.UF; ++Part) {
9736     Value *NewVecOp = State.get(getVecOp(), Part);
9737     if (VPValue *Cond = getCondOp()) {
9738       Value *NewCond = State.get(Cond, Part);
9739       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9740       Value *Iden = RdxDesc->getRecurrenceIdentity(
9741           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9742       Value *IdenVec =
9743           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9744       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9745       NewVecOp = Select;
9746     }
9747     Value *NewRed;
9748     Value *NextInChain;
9749     if (IsOrdered) {
9750       if (State.VF.isVector())
9751         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9752                                         PrevInChain);
9753       else
9754         NewRed = State.Builder.CreateBinOp(
9755             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9756             NewVecOp);
9757       PrevInChain = NewRed;
9758     } else {
9759       PrevInChain = State.get(getChainOp(), Part);
9760       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9761     }
9762     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9763       NextInChain =
9764           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9765                          NewRed, PrevInChain);
9766     } else if (IsOrdered)
9767       NextInChain = NewRed;
9768     else
9769       NextInChain = State.Builder.CreateBinOp(
9770           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9771           PrevInChain);
9772     State.set(this, NextInChain, Part);
9773   }
9774 }
9775 
9776 void VPReplicateRecipe::execute(VPTransformState &State) {
9777   if (State.Instance) { // Generate a single instance.
9778     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9779     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9780                                     IsPredicated, State);
9781     // Insert scalar instance packing it into a vector.
9782     if (AlsoPack && State.VF.isVector()) {
9783       // If we're constructing lane 0, initialize to start from poison.
9784       if (State.Instance->Lane.isFirstLane()) {
9785         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9786         Value *Poison = PoisonValue::get(
9787             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9788         State.set(this, Poison, State.Instance->Part);
9789       }
9790       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9791     }
9792     return;
9793   }
9794 
9795   // Generate scalar instances for all VF lanes of all UF parts, unless the
9796   // instruction is uniform inwhich case generate only the first lane for each
9797   // of the UF parts.
9798   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9799   assert((!State.VF.isScalable() || IsUniform) &&
9800          "Can't scalarize a scalable vector");
9801   for (unsigned Part = 0; Part < State.UF; ++Part)
9802     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9803       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9804                                       VPIteration(Part, Lane), IsPredicated,
9805                                       State);
9806 }
9807 
9808 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9809   assert(State.Instance && "Branch on Mask works only on single instance.");
9810 
9811   unsigned Part = State.Instance->Part;
9812   unsigned Lane = State.Instance->Lane.getKnownLane();
9813 
9814   Value *ConditionBit = nullptr;
9815   VPValue *BlockInMask = getMask();
9816   if (BlockInMask) {
9817     ConditionBit = State.get(BlockInMask, Part);
9818     if (ConditionBit->getType()->isVectorTy())
9819       ConditionBit = State.Builder.CreateExtractElement(
9820           ConditionBit, State.Builder.getInt32(Lane));
9821   } else // Block in mask is all-one.
9822     ConditionBit = State.Builder.getTrue();
9823 
9824   // Replace the temporary unreachable terminator with a new conditional branch,
9825   // whose two destinations will be set later when they are created.
9826   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9827   assert(isa<UnreachableInst>(CurrentTerminator) &&
9828          "Expected to replace unreachable terminator with conditional branch.");
9829   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9830   CondBr->setSuccessor(0, nullptr);
9831   ReplaceInstWithInst(CurrentTerminator, CondBr);
9832 }
9833 
9834 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9835   assert(State.Instance && "Predicated instruction PHI works per instance.");
9836   Instruction *ScalarPredInst =
9837       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9838   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9839   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9840   assert(PredicatingBB && "Predicated block has no single predecessor.");
9841   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9842          "operand must be VPReplicateRecipe");
9843 
9844   // By current pack/unpack logic we need to generate only a single phi node: if
9845   // a vector value for the predicated instruction exists at this point it means
9846   // the instruction has vector users only, and a phi for the vector value is
9847   // needed. In this case the recipe of the predicated instruction is marked to
9848   // also do that packing, thereby "hoisting" the insert-element sequence.
9849   // Otherwise, a phi node for the scalar value is needed.
9850   unsigned Part = State.Instance->Part;
9851   if (State.hasVectorValue(getOperand(0), Part)) {
9852     Value *VectorValue = State.get(getOperand(0), Part);
9853     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9854     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9855     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9856     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9857     if (State.hasVectorValue(this, Part))
9858       State.reset(this, VPhi, Part);
9859     else
9860       State.set(this, VPhi, Part);
9861     // NOTE: Currently we need to update the value of the operand, so the next
9862     // predicated iteration inserts its generated value in the correct vector.
9863     State.reset(getOperand(0), VPhi, Part);
9864   } else {
9865     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9866     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9867     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9868                      PredicatingBB);
9869     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9870     if (State.hasScalarValue(this, *State.Instance))
9871       State.reset(this, Phi, *State.Instance);
9872     else
9873       State.set(this, Phi, *State.Instance);
9874     // NOTE: Currently we need to update the value of the operand, so the next
9875     // predicated iteration inserts its generated value in the correct vector.
9876     State.reset(getOperand(0), Phi, *State.Instance);
9877   }
9878 }
9879 
9880 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9881   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9882 
9883   // Attempt to issue a wide load.
9884   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9885   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9886 
9887   assert((LI || SI) && "Invalid Load/Store instruction");
9888   assert((!SI || StoredValue) && "No stored value provided for widened store");
9889   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9890 
9891   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9892 
9893   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9894   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9895   bool CreateGatherScatter = !Consecutive;
9896 
9897   auto &Builder = State.Builder;
9898   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9899   bool isMaskRequired = getMask();
9900   if (isMaskRequired)
9901     for (unsigned Part = 0; Part < State.UF; ++Part)
9902       BlockInMaskParts[Part] = State.get(getMask(), Part);
9903 
9904   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9905     // Calculate the pointer for the specific unroll-part.
9906     GetElementPtrInst *PartPtr = nullptr;
9907 
9908     bool InBounds = false;
9909     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9910       InBounds = gep->isInBounds();
9911     if (Reverse) {
9912       // If the address is consecutive but reversed, then the
9913       // wide store needs to start at the last vector element.
9914       // RunTimeVF =  VScale * VF.getKnownMinValue()
9915       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9916       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9917       // NumElt = -Part * RunTimeVF
9918       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9919       // LastLane = 1 - RunTimeVF
9920       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9921       PartPtr =
9922           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9923       PartPtr->setIsInBounds(InBounds);
9924       PartPtr = cast<GetElementPtrInst>(
9925           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9926       PartPtr->setIsInBounds(InBounds);
9927       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9928         BlockInMaskParts[Part] =
9929             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9930     } else {
9931       Value *Increment =
9932           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9933       PartPtr = cast<GetElementPtrInst>(
9934           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9935       PartPtr->setIsInBounds(InBounds);
9936     }
9937 
9938     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9939     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9940   };
9941 
9942   // Handle Stores:
9943   if (SI) {
9944     State.ILV->setDebugLocFromInst(SI);
9945 
9946     for (unsigned Part = 0; Part < State.UF; ++Part) {
9947       Instruction *NewSI = nullptr;
9948       Value *StoredVal = State.get(StoredValue, Part);
9949       if (CreateGatherScatter) {
9950         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9951         Value *VectorGep = State.get(getAddr(), Part);
9952         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9953                                             MaskPart);
9954       } else {
9955         if (Reverse) {
9956           // If we store to reverse consecutive memory locations, then we need
9957           // to reverse the order of elements in the stored value.
9958           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
9959           // We don't want to update the value in the map as it might be used in
9960           // another expression. So don't call resetVectorValue(StoredVal).
9961         }
9962         auto *VecPtr =
9963             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
9964         if (isMaskRequired)
9965           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
9966                                             BlockInMaskParts[Part]);
9967         else
9968           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
9969       }
9970       State.ILV->addMetadata(NewSI, SI);
9971     }
9972     return;
9973   }
9974 
9975   // Handle loads.
9976   assert(LI && "Must have a load instruction");
9977   State.ILV->setDebugLocFromInst(LI);
9978   for (unsigned Part = 0; Part < State.UF; ++Part) {
9979     Value *NewLI;
9980     if (CreateGatherScatter) {
9981       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9982       Value *VectorGep = State.get(getAddr(), Part);
9983       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
9984                                          nullptr, "wide.masked.gather");
9985       State.ILV->addMetadata(NewLI, LI);
9986     } else {
9987       auto *VecPtr =
9988           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
9989       if (isMaskRequired)
9990         NewLI = Builder.CreateMaskedLoad(
9991             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
9992             PoisonValue::get(DataTy), "wide.masked.load");
9993       else
9994         NewLI =
9995             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
9996 
9997       // Add metadata to the load, but setVectorValue to the reverse shuffle.
9998       State.ILV->addMetadata(NewLI, LI);
9999       if (Reverse)
10000         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10001     }
10002 
10003     State.set(this, NewLI, Part);
10004   }
10005 }
10006 
10007 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10008 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10009 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10010 // for predication.
10011 static ScalarEpilogueLowering getScalarEpilogueLowering(
10012     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10013     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10014     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10015     LoopVectorizationLegality &LVL) {
10016   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10017   // don't look at hints or options, and don't request a scalar epilogue.
10018   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10019   // LoopAccessInfo (due to code dependency and not being able to reliably get
10020   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10021   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10022   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10023   // back to the old way and vectorize with versioning when forced. See D81345.)
10024   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10025                                                       PGSOQueryType::IRPass) &&
10026                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10027     return CM_ScalarEpilogueNotAllowedOptSize;
10028 
10029   // 2) If set, obey the directives
10030   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10031     switch (PreferPredicateOverEpilogue) {
10032     case PreferPredicateTy::ScalarEpilogue:
10033       return CM_ScalarEpilogueAllowed;
10034     case PreferPredicateTy::PredicateElseScalarEpilogue:
10035       return CM_ScalarEpilogueNotNeededUsePredicate;
10036     case PreferPredicateTy::PredicateOrDontVectorize:
10037       return CM_ScalarEpilogueNotAllowedUsePredicate;
10038     };
10039   }
10040 
10041   // 3) If set, obey the hints
10042   switch (Hints.getPredicate()) {
10043   case LoopVectorizeHints::FK_Enabled:
10044     return CM_ScalarEpilogueNotNeededUsePredicate;
10045   case LoopVectorizeHints::FK_Disabled:
10046     return CM_ScalarEpilogueAllowed;
10047   };
10048 
10049   // 4) if the TTI hook indicates this is profitable, request predication.
10050   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10051                                        LVL.getLAI()))
10052     return CM_ScalarEpilogueNotNeededUsePredicate;
10053 
10054   return CM_ScalarEpilogueAllowed;
10055 }
10056 
10057 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10058   // If Values have been set for this Def return the one relevant for \p Part.
10059   if (hasVectorValue(Def, Part))
10060     return Data.PerPartOutput[Def][Part];
10061 
10062   if (!hasScalarValue(Def, {Part, 0})) {
10063     Value *IRV = Def->getLiveInIRValue();
10064     Value *B = ILV->getBroadcastInstrs(IRV);
10065     set(Def, B, Part);
10066     return B;
10067   }
10068 
10069   Value *ScalarValue = get(Def, {Part, 0});
10070   // If we aren't vectorizing, we can just copy the scalar map values over
10071   // to the vector map.
10072   if (VF.isScalar()) {
10073     set(Def, ScalarValue, Part);
10074     return ScalarValue;
10075   }
10076 
10077   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10078   bool IsUniform = RepR && RepR->isUniform();
10079 
10080   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10081   // Check if there is a scalar value for the selected lane.
10082   if (!hasScalarValue(Def, {Part, LastLane})) {
10083     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10084     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
10085            "unexpected recipe found to be invariant");
10086     IsUniform = true;
10087     LastLane = 0;
10088   }
10089 
10090   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10091   // Set the insert point after the last scalarized instruction or after the
10092   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10093   // will directly follow the scalar definitions.
10094   auto OldIP = Builder.saveIP();
10095   auto NewIP =
10096       isa<PHINode>(LastInst)
10097           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10098           : std::next(BasicBlock::iterator(LastInst));
10099   Builder.SetInsertPoint(&*NewIP);
10100 
10101   // However, if we are vectorizing, we need to construct the vector values.
10102   // If the value is known to be uniform after vectorization, we can just
10103   // broadcast the scalar value corresponding to lane zero for each unroll
10104   // iteration. Otherwise, we construct the vector values using
10105   // insertelement instructions. Since the resulting vectors are stored in
10106   // State, we will only generate the insertelements once.
10107   Value *VectorValue = nullptr;
10108   if (IsUniform) {
10109     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10110     set(Def, VectorValue, Part);
10111   } else {
10112     // Initialize packing with insertelements to start from undef.
10113     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10114     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10115     set(Def, Undef, Part);
10116     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10117       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10118     VectorValue = get(Def, Part);
10119   }
10120   Builder.restoreIP(OldIP);
10121   return VectorValue;
10122 }
10123 
10124 // Process the loop in the VPlan-native vectorization path. This path builds
10125 // VPlan upfront in the vectorization pipeline, which allows to apply
10126 // VPlan-to-VPlan transformations from the very beginning without modifying the
10127 // input LLVM IR.
10128 static bool processLoopInVPlanNativePath(
10129     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10130     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10131     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10132     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10133     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10134     LoopVectorizationRequirements &Requirements) {
10135 
10136   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10137     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10138     return false;
10139   }
10140   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10141   Function *F = L->getHeader()->getParent();
10142   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10143 
10144   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10145       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10146 
10147   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10148                                 &Hints, IAI);
10149   // Use the planner for outer loop vectorization.
10150   // TODO: CM is not used at this point inside the planner. Turn CM into an
10151   // optional argument if we don't need it in the future.
10152   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10153                                Requirements, ORE);
10154 
10155   // Get user vectorization factor.
10156   ElementCount UserVF = Hints.getWidth();
10157 
10158   CM.collectElementTypesForWidening();
10159 
10160   // Plan how to best vectorize, return the best VF and its cost.
10161   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10162 
10163   // If we are stress testing VPlan builds, do not attempt to generate vector
10164   // code. Masked vector code generation support will follow soon.
10165   // Also, do not attempt to vectorize if no vector code will be produced.
10166   if (VPlanBuildStressTest || EnableVPlanPredication ||
10167       VectorizationFactor::Disabled() == VF)
10168     return false;
10169 
10170   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10171 
10172   {
10173     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10174                              F->getParent()->getDataLayout());
10175     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10176                            &CM, BFI, PSI, Checks);
10177     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10178                       << L->getHeader()->getParent()->getName() << "\"\n");
10179     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10180   }
10181 
10182   // Mark the loop as already vectorized to avoid vectorizing again.
10183   Hints.setAlreadyVectorized();
10184   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10185   return true;
10186 }
10187 
10188 // Emit a remark if there are stores to floats that required a floating point
10189 // extension. If the vectorized loop was generated with floating point there
10190 // will be a performance penalty from the conversion overhead and the change in
10191 // the vector width.
10192 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10193   SmallVector<Instruction *, 4> Worklist;
10194   for (BasicBlock *BB : L->getBlocks()) {
10195     for (Instruction &Inst : *BB) {
10196       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10197         if (S->getValueOperand()->getType()->isFloatTy())
10198           Worklist.push_back(S);
10199       }
10200     }
10201   }
10202 
10203   // Traverse the floating point stores upwards searching, for floating point
10204   // conversions.
10205   SmallPtrSet<const Instruction *, 4> Visited;
10206   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10207   while (!Worklist.empty()) {
10208     auto *I = Worklist.pop_back_val();
10209     if (!L->contains(I))
10210       continue;
10211     if (!Visited.insert(I).second)
10212       continue;
10213 
10214     // Emit a remark if the floating point store required a floating
10215     // point conversion.
10216     // TODO: More work could be done to identify the root cause such as a
10217     // constant or a function return type and point the user to it.
10218     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10219       ORE->emit([&]() {
10220         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10221                                           I->getDebugLoc(), L->getHeader())
10222                << "floating point conversion changes vector width. "
10223                << "Mixed floating point precision requires an up/down "
10224                << "cast that will negatively impact performance.";
10225       });
10226 
10227     for (Use &Op : I->operands())
10228       if (auto *OpI = dyn_cast<Instruction>(Op))
10229         Worklist.push_back(OpI);
10230   }
10231 }
10232 
10233 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10234     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10235                                !EnableLoopInterleaving),
10236       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10237                               !EnableLoopVectorization) {}
10238 
10239 bool LoopVectorizePass::processLoop(Loop *L) {
10240   assert((EnableVPlanNativePath || L->isInnermost()) &&
10241          "VPlan-native path is not enabled. Only process inner loops.");
10242 
10243 #ifndef NDEBUG
10244   const std::string DebugLocStr = getDebugLocString(L);
10245 #endif /* NDEBUG */
10246 
10247   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
10248                     << L->getHeader()->getParent()->getName() << "\" from "
10249                     << DebugLocStr << "\n");
10250 
10251   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10252 
10253   LLVM_DEBUG(
10254       dbgs() << "LV: Loop hints:"
10255              << " force="
10256              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10257                      ? "disabled"
10258                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10259                             ? "enabled"
10260                             : "?"))
10261              << " width=" << Hints.getWidth()
10262              << " interleave=" << Hints.getInterleave() << "\n");
10263 
10264   // Function containing loop
10265   Function *F = L->getHeader()->getParent();
10266 
10267   // Looking at the diagnostic output is the only way to determine if a loop
10268   // was vectorized (other than looking at the IR or machine code), so it
10269   // is important to generate an optimization remark for each loop. Most of
10270   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10271   // generated as OptimizationRemark and OptimizationRemarkMissed are
10272   // less verbose reporting vectorized loops and unvectorized loops that may
10273   // benefit from vectorization, respectively.
10274 
10275   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10276     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10277     return false;
10278   }
10279 
10280   PredicatedScalarEvolution PSE(*SE, *L);
10281 
10282   // Check if it is legal to vectorize the loop.
10283   LoopVectorizationRequirements Requirements;
10284   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10285                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10286   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10287     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10288     Hints.emitRemarkWithHints();
10289     return false;
10290   }
10291 
10292   // Check the function attributes and profiles to find out if this function
10293   // should be optimized for size.
10294   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10295       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10296 
10297   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10298   // here. They may require CFG and instruction level transformations before
10299   // even evaluating whether vectorization is profitable. Since we cannot modify
10300   // the incoming IR, we need to build VPlan upfront in the vectorization
10301   // pipeline.
10302   if (!L->isInnermost())
10303     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10304                                         ORE, BFI, PSI, Hints, Requirements);
10305 
10306   assert(L->isInnermost() && "Inner loop expected.");
10307 
10308   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10309   // count by optimizing for size, to minimize overheads.
10310   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10311   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10312     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10313                       << "This loop is worth vectorizing only if no scalar "
10314                       << "iteration overheads are incurred.");
10315     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10316       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10317     else {
10318       LLVM_DEBUG(dbgs() << "\n");
10319       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10320     }
10321   }
10322 
10323   // Check the function attributes to see if implicit floats are allowed.
10324   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10325   // an integer loop and the vector instructions selected are purely integer
10326   // vector instructions?
10327   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10328     reportVectorizationFailure(
10329         "Can't vectorize when the NoImplicitFloat attribute is used",
10330         "loop not vectorized due to NoImplicitFloat attribute",
10331         "NoImplicitFloat", ORE, L);
10332     Hints.emitRemarkWithHints();
10333     return false;
10334   }
10335 
10336   // Check if the target supports potentially unsafe FP vectorization.
10337   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10338   // for the target we're vectorizing for, to make sure none of the
10339   // additional fp-math flags can help.
10340   if (Hints.isPotentiallyUnsafe() &&
10341       TTI->isFPVectorizationPotentiallyUnsafe()) {
10342     reportVectorizationFailure(
10343         "Potentially unsafe FP op prevents vectorization",
10344         "loop not vectorized due to unsafe FP support.",
10345         "UnsafeFP", ORE, L);
10346     Hints.emitRemarkWithHints();
10347     return false;
10348   }
10349 
10350   bool AllowOrderedReductions;
10351   // If the flag is set, use that instead and override the TTI behaviour.
10352   if (ForceOrderedReductions.getNumOccurrences() > 0)
10353     AllowOrderedReductions = ForceOrderedReductions;
10354   else
10355     AllowOrderedReductions = TTI->enableOrderedReductions();
10356   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10357     ORE->emit([&]() {
10358       auto *ExactFPMathInst = Requirements.getExactFPInst();
10359       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10360                                                  ExactFPMathInst->getDebugLoc(),
10361                                                  ExactFPMathInst->getParent())
10362              << "loop not vectorized: cannot prove it is safe to reorder "
10363                 "floating-point operations";
10364     });
10365     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10366                          "reorder floating-point operations\n");
10367     Hints.emitRemarkWithHints();
10368     return false;
10369   }
10370 
10371   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10372   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10373 
10374   // If an override option has been passed in for interleaved accesses, use it.
10375   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10376     UseInterleaved = EnableInterleavedMemAccesses;
10377 
10378   // Analyze interleaved memory accesses.
10379   if (UseInterleaved) {
10380     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10381   }
10382 
10383   // Use the cost model.
10384   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10385                                 F, &Hints, IAI);
10386   CM.collectValuesToIgnore();
10387   CM.collectElementTypesForWidening();
10388 
10389   // Use the planner for vectorization.
10390   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10391                                Requirements, ORE);
10392 
10393   // Get user vectorization factor and interleave count.
10394   ElementCount UserVF = Hints.getWidth();
10395   unsigned UserIC = Hints.getInterleave();
10396 
10397   // Plan how to best vectorize, return the best VF and its cost.
10398   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10399 
10400   VectorizationFactor VF = VectorizationFactor::Disabled();
10401   unsigned IC = 1;
10402 
10403   if (MaybeVF) {
10404     VF = *MaybeVF;
10405     // Select the interleave count.
10406     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10407   }
10408 
10409   // Identify the diagnostic messages that should be produced.
10410   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10411   bool VectorizeLoop = true, InterleaveLoop = true;
10412   if (VF.Width.isScalar()) {
10413     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10414     VecDiagMsg = std::make_pair(
10415         "VectorizationNotBeneficial",
10416         "the cost-model indicates that vectorization is not beneficial");
10417     VectorizeLoop = false;
10418   }
10419 
10420   if (!MaybeVF && UserIC > 1) {
10421     // Tell the user interleaving was avoided up-front, despite being explicitly
10422     // requested.
10423     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10424                          "interleaving should be avoided up front\n");
10425     IntDiagMsg = std::make_pair(
10426         "InterleavingAvoided",
10427         "Ignoring UserIC, because interleaving was avoided up front");
10428     InterleaveLoop = false;
10429   } else if (IC == 1 && UserIC <= 1) {
10430     // Tell the user interleaving is not beneficial.
10431     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10432     IntDiagMsg = std::make_pair(
10433         "InterleavingNotBeneficial",
10434         "the cost-model indicates that interleaving is not beneficial");
10435     InterleaveLoop = false;
10436     if (UserIC == 1) {
10437       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10438       IntDiagMsg.second +=
10439           " and is explicitly disabled or interleave count is set to 1";
10440     }
10441   } else if (IC > 1 && UserIC == 1) {
10442     // Tell the user interleaving is beneficial, but it explicitly disabled.
10443     LLVM_DEBUG(
10444         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10445     IntDiagMsg = std::make_pair(
10446         "InterleavingBeneficialButDisabled",
10447         "the cost-model indicates that interleaving is beneficial "
10448         "but is explicitly disabled or interleave count is set to 1");
10449     InterleaveLoop = false;
10450   }
10451 
10452   // Override IC if user provided an interleave count.
10453   IC = UserIC > 0 ? UserIC : IC;
10454 
10455   // Emit diagnostic messages, if any.
10456   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10457   if (!VectorizeLoop && !InterleaveLoop) {
10458     // Do not vectorize or interleaving the loop.
10459     ORE->emit([&]() {
10460       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10461                                       L->getStartLoc(), L->getHeader())
10462              << VecDiagMsg.second;
10463     });
10464     ORE->emit([&]() {
10465       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10466                                       L->getStartLoc(), L->getHeader())
10467              << IntDiagMsg.second;
10468     });
10469     return false;
10470   } else if (!VectorizeLoop && InterleaveLoop) {
10471     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10472     ORE->emit([&]() {
10473       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10474                                         L->getStartLoc(), L->getHeader())
10475              << VecDiagMsg.second;
10476     });
10477   } else if (VectorizeLoop && !InterleaveLoop) {
10478     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10479                       << ") in " << DebugLocStr << '\n');
10480     ORE->emit([&]() {
10481       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10482                                         L->getStartLoc(), L->getHeader())
10483              << IntDiagMsg.second;
10484     });
10485   } else if (VectorizeLoop && InterleaveLoop) {
10486     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10487                       << ") in " << DebugLocStr << '\n');
10488     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10489   }
10490 
10491   bool DisableRuntimeUnroll = false;
10492   MDNode *OrigLoopID = L->getLoopID();
10493   {
10494     // Optimistically generate runtime checks. Drop them if they turn out to not
10495     // be profitable. Limit the scope of Checks, so the cleanup happens
10496     // immediately after vector codegeneration is done.
10497     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10498                              F->getParent()->getDataLayout());
10499     if (!VF.Width.isScalar() || IC > 1)
10500       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10501 
10502     using namespace ore;
10503     if (!VectorizeLoop) {
10504       assert(IC > 1 && "interleave count should not be 1 or 0");
10505       // If we decided that it is not legal to vectorize the loop, then
10506       // interleave it.
10507       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10508                                  &CM, BFI, PSI, Checks);
10509 
10510       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10511       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10512 
10513       ORE->emit([&]() {
10514         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10515                                   L->getHeader())
10516                << "interleaved loop (interleaved count: "
10517                << NV("InterleaveCount", IC) << ")";
10518       });
10519     } else {
10520       // If we decided that it is *legal* to vectorize the loop, then do it.
10521 
10522       // Consider vectorizing the epilogue too if it's profitable.
10523       VectorizationFactor EpilogueVF =
10524           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10525       if (EpilogueVF.Width.isVector()) {
10526 
10527         // The first pass vectorizes the main loop and creates a scalar epilogue
10528         // to be vectorized by executing the plan (potentially with a different
10529         // factor) again shortly afterwards.
10530         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10531         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10532                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10533 
10534         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10535         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10536                         DT);
10537         ++LoopsVectorized;
10538 
10539         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10540         formLCSSARecursively(*L, *DT, LI, SE);
10541 
10542         // Second pass vectorizes the epilogue and adjusts the control flow
10543         // edges from the first pass.
10544         EPI.MainLoopVF = EPI.EpilogueVF;
10545         EPI.MainLoopUF = EPI.EpilogueUF;
10546         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10547                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10548                                                  Checks);
10549 
10550         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10551         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10552                         DT);
10553         ++LoopsEpilogueVectorized;
10554 
10555         if (!MainILV.areSafetyChecksAdded())
10556           DisableRuntimeUnroll = true;
10557       } else {
10558         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10559                                &LVL, &CM, BFI, PSI, Checks);
10560 
10561         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10562         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10563         ++LoopsVectorized;
10564 
10565         // Add metadata to disable runtime unrolling a scalar loop when there
10566         // are no runtime checks about strides and memory. A scalar loop that is
10567         // rarely used is not worth unrolling.
10568         if (!LB.areSafetyChecksAdded())
10569           DisableRuntimeUnroll = true;
10570       }
10571       // Report the vectorization decision.
10572       ORE->emit([&]() {
10573         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10574                                   L->getHeader())
10575                << "vectorized loop (vectorization width: "
10576                << NV("VectorizationFactor", VF.Width)
10577                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10578       });
10579     }
10580 
10581     if (ORE->allowExtraAnalysis(LV_NAME))
10582       checkMixedPrecision(L, ORE);
10583   }
10584 
10585   Optional<MDNode *> RemainderLoopID =
10586       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10587                                       LLVMLoopVectorizeFollowupEpilogue});
10588   if (RemainderLoopID.hasValue()) {
10589     L->setLoopID(RemainderLoopID.getValue());
10590   } else {
10591     if (DisableRuntimeUnroll)
10592       AddRuntimeUnrollDisableMetaData(L);
10593 
10594     // Mark the loop as already vectorized to avoid vectorizing again.
10595     Hints.setAlreadyVectorized();
10596   }
10597 
10598   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10599   return true;
10600 }
10601 
10602 LoopVectorizeResult LoopVectorizePass::runImpl(
10603     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10604     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10605     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10606     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10607     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10608   SE = &SE_;
10609   LI = &LI_;
10610   TTI = &TTI_;
10611   DT = &DT_;
10612   BFI = &BFI_;
10613   TLI = TLI_;
10614   AA = &AA_;
10615   AC = &AC_;
10616   GetLAA = &GetLAA_;
10617   DB = &DB_;
10618   ORE = &ORE_;
10619   PSI = PSI_;
10620 
10621   // Don't attempt if
10622   // 1. the target claims to have no vector registers, and
10623   // 2. interleaving won't help ILP.
10624   //
10625   // The second condition is necessary because, even if the target has no
10626   // vector registers, loop vectorization may still enable scalar
10627   // interleaving.
10628   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10629       TTI->getMaxInterleaveFactor(1) < 2)
10630     return LoopVectorizeResult(false, false);
10631 
10632   bool Changed = false, CFGChanged = false;
10633 
10634   // The vectorizer requires loops to be in simplified form.
10635   // Since simplification may add new inner loops, it has to run before the
10636   // legality and profitability checks. This means running the loop vectorizer
10637   // will simplify all loops, regardless of whether anything end up being
10638   // vectorized.
10639   for (auto &L : *LI)
10640     Changed |= CFGChanged |=
10641         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10642 
10643   // Build up a worklist of inner-loops to vectorize. This is necessary as
10644   // the act of vectorizing or partially unrolling a loop creates new loops
10645   // and can invalidate iterators across the loops.
10646   SmallVector<Loop *, 8> Worklist;
10647 
10648   for (Loop *L : *LI)
10649     collectSupportedLoops(*L, LI, ORE, Worklist);
10650 
10651   LoopsAnalyzed += Worklist.size();
10652 
10653   // Now walk the identified inner loops.
10654   while (!Worklist.empty()) {
10655     Loop *L = Worklist.pop_back_val();
10656 
10657     // For the inner loops we actually process, form LCSSA to simplify the
10658     // transform.
10659     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10660 
10661     Changed |= CFGChanged |= processLoop(L);
10662   }
10663 
10664   // Process each loop nest in the function.
10665   return LoopVectorizeResult(Changed, CFGChanged);
10666 }
10667 
10668 PreservedAnalyses LoopVectorizePass::run(Function &F,
10669                                          FunctionAnalysisManager &AM) {
10670     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10671     auto &LI = AM.getResult<LoopAnalysis>(F);
10672     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10673     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10674     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10675     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10676     auto &AA = AM.getResult<AAManager>(F);
10677     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10678     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10679     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10680 
10681     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10682     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10683         [&](Loop &L) -> const LoopAccessInfo & {
10684       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10685                                         TLI, TTI, nullptr, nullptr, nullptr};
10686       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10687     };
10688     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10689     ProfileSummaryInfo *PSI =
10690         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10691     LoopVectorizeResult Result =
10692         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10693     if (!Result.MadeAnyChange)
10694       return PreservedAnalyses::all();
10695     PreservedAnalyses PA;
10696 
10697     // We currently do not preserve loopinfo/dominator analyses with outer loop
10698     // vectorization. Until this is addressed, mark these analyses as preserved
10699     // only for non-VPlan-native path.
10700     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10701     if (!EnableVPlanNativePath) {
10702       PA.preserve<LoopAnalysis>();
10703       PA.preserve<DominatorTreeAnalysis>();
10704     }
10705 
10706     if (Result.MadeCFGChange) {
10707       // Making CFG changes likely means a loop got vectorized. Indicate that
10708       // extra simplification passes should be run.
10709       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10710       // be run if runtime checks have been added.
10711       AM.getResult<ShouldRunExtraVectorPasses>(F);
10712       PA.preserve<ShouldRunExtraVectorPasses>();
10713     } else {
10714       PA.preserveSet<CFGAnalyses>();
10715     }
10716     return PA;
10717 }
10718 
10719 void LoopVectorizePass::printPipeline(
10720     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10721   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10722       OS, MapClassName2PassName);
10723 
10724   OS << "<";
10725   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10726   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10727   OS << ">";
10728 }
10729