1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 /// The number of stores in a loop that are allowed to need predication.
311 static cl::opt<unsigned> NumberOfStoresToPredicate(
312     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313     cl::desc("Max number of stores to be predicated behind an if."));
314 
315 static cl::opt<bool> EnableIndVarRegisterHeur(
316     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317     cl::desc("Count the induction variable only once when interleaving"));
318 
319 static cl::opt<bool> EnableCondStoresVectorization(
320     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321     cl::desc("Enable if predication of stores during vectorization."));
322 
323 static cl::opt<unsigned> MaxNestedScalarReductionIC(
324     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325     cl::desc("The maximum interleave count to use when interleaving a scalar "
326              "reduction in a nested loop."));
327 
328 static cl::opt<bool>
329     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330                            cl::Hidden,
331                            cl::desc("Prefer in-loop vector reductions, "
332                                     "overriding the targets preference."));
333 
334 static cl::opt<bool> ForceOrderedReductions(
335     "force-ordered-reductions", cl::init(false), cl::Hidden,
336     cl::desc("Enable the vectorisation of loops with in-order (strict) "
337              "FP reductions"));
338 
339 static cl::opt<bool> PreferPredicatedReductionSelect(
340     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341     cl::desc(
342         "Prefer predicating a reduction operation over an after loop select."));
343 
344 cl::opt<bool> EnableVPlanNativePath(
345     "enable-vplan-native-path", cl::init(false), cl::Hidden,
346     cl::desc("Enable VPlan-native vectorization path with "
347              "support for outer loop vectorization."));
348 
349 // FIXME: Remove this switch once we have divergence analysis. Currently we
350 // assume divergent non-backedge branches when this switch is true.
351 cl::opt<bool> EnableVPlanPredication(
352     "enable-vplan-predication", cl::init(false), cl::Hidden,
353     cl::desc("Enable VPlan-native vectorization path predicator with "
354              "support for outer loop vectorization."));
355 
356 // This flag enables the stress testing of the VPlan H-CFG construction in the
357 // VPlan-native vectorization path. It must be used in conjuction with
358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359 // verification of the H-CFGs built.
360 static cl::opt<bool> VPlanBuildStressTest(
361     "vplan-build-stress-test", cl::init(false), cl::Hidden,
362     cl::desc(
363         "Build VPlan for every supported loop nest in the function and bail "
364         "out right after the build (stress test the VPlan H-CFG construction "
365         "in the VPlan-native vectorization path)."));
366 
367 cl::opt<bool> llvm::EnableLoopInterleaving(
368     "interleave-loops", cl::init(true), cl::Hidden,
369     cl::desc("Enable loop interleaving in Loop vectorization passes"));
370 cl::opt<bool> llvm::EnableLoopVectorization(
371     "vectorize-loops", cl::init(true), cl::Hidden,
372     cl::desc("Run the Loop vectorization passes"));
373 
374 cl::opt<bool> PrintVPlansInDotFormat(
375     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376     cl::desc("Use dot format instead of plain text when dumping VPlans"));
377 
378 /// A helper function that returns true if the given type is irregular. The
379 /// type is irregular if its allocated size doesn't equal the store size of an
380 /// element of the corresponding vector type.
381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
382   // Determine if an array of N elements of type Ty is "bitcast compatible"
383   // with a <N x Ty> vector.
384   // This is only true if there is no padding between the array elements.
385   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
386 }
387 
388 /// A helper function that returns the reciprocal of the block probability of
389 /// predicated blocks. If we return X, we are assuming the predicated block
390 /// will execute once for every X iterations of the loop header.
391 ///
392 /// TODO: We should use actual block probability here, if available. Currently,
393 ///       we always assume predicated blocks have a 50% chance of executing.
394 static unsigned getReciprocalPredBlockProb() { return 2; }
395 
396 /// A helper function that returns an integer or floating-point constant with
397 /// value C.
398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
399   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
400                            : ConstantFP::get(Ty, C);
401 }
402 
403 /// Returns "best known" trip count for the specified loop \p L as defined by
404 /// the following procedure:
405 ///   1) Returns exact trip count if it is known.
406 ///   2) Returns expected trip count according to profile data if any.
407 ///   3) Returns upper bound estimate if it is known.
408 ///   4) Returns None if all of the above failed.
409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
410   // Check if exact trip count is known.
411   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
412     return ExpectedTC;
413 
414   // Check if there is an expected trip count available from profile data.
415   if (LoopVectorizeWithBlockFrequency)
416     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
417       return EstimatedTC;
418 
419   // Check if upper bound estimate is known.
420   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
421     return ExpectedTC;
422 
423   return None;
424 }
425 
426 // Forward declare GeneratedRTChecks.
427 class GeneratedRTChecks;
428 
429 namespace llvm {
430 
431 AnalysisKey ShouldRunExtraVectorPasses::Key;
432 
433 /// InnerLoopVectorizer vectorizes loops which contain only one basic
434 /// block to a specified vectorization factor (VF).
435 /// This class performs the widening of scalars into vectors, or multiple
436 /// scalars. This class also implements the following features:
437 /// * It inserts an epilogue loop for handling loops that don't have iteration
438 ///   counts that are known to be a multiple of the vectorization factor.
439 /// * It handles the code generation for reduction variables.
440 /// * Scalarization (implementation using scalars) of un-vectorizable
441 ///   instructions.
442 /// InnerLoopVectorizer does not perform any vectorization-legality
443 /// checks, and relies on the caller to check for the different legality
444 /// aspects. The InnerLoopVectorizer relies on the
445 /// LoopVectorizationLegality class to provide information about the induction
446 /// and reduction variables that were found to a given vectorization factor.
447 class InnerLoopVectorizer {
448 public:
449   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
450                       LoopInfo *LI, DominatorTree *DT,
451                       const TargetLibraryInfo *TLI,
452                       const TargetTransformInfo *TTI, AssumptionCache *AC,
453                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
454                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
455                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
456                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
457       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
458         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
459         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
460         PSI(PSI), RTChecks(RTChecks) {
461     // Query this against the original loop and save it here because the profile
462     // of the original loop header may change as the transformation happens.
463     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
464         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
465   }
466 
467   virtual ~InnerLoopVectorizer() = default;
468 
469   /// Create a new empty loop that will contain vectorized instructions later
470   /// on, while the old loop will be used as the scalar remainder. Control flow
471   /// is generated around the vectorized (and scalar epilogue) loops consisting
472   /// of various checks and bypasses. Return the pre-header block of the new
473   /// loop and the start value for the canonical induction, if it is != 0. The
474   /// latter is the case when vectorizing the epilogue loop. In the case of
475   /// epilogue vectorization, this function is overriden to handle the more
476   /// complex control flow around the loops.
477   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
478 
479   /// Widen a single call instruction within the innermost loop.
480   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
481                             VPTransformState &State);
482 
483   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
484   void fixVectorizedLoop(VPTransformState &State);
485 
486   // Return true if any runtime check is added.
487   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
488 
489   /// A type for vectorized values in the new loop. Each value from the
490   /// original loop, when vectorized, is represented by UF vector values in the
491   /// new unrolled loop, where UF is the unroll factor.
492   using VectorParts = SmallVector<Value *, 2>;
493 
494   /// Vectorize a single first-order recurrence or pointer induction PHINode in
495   /// a block. This method handles the induction variable canonicalization. It
496   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
497   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
498                            VPTransformState &State);
499 
500   /// A helper function to scalarize a single Instruction in the innermost loop.
501   /// Generates a sequence of scalar instances for each lane between \p MinLane
502   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
503   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
504   /// Instr's operands.
505   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
506                             const VPIteration &Instance, bool IfPredicateInstr,
507                             VPTransformState &State);
508 
509   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
510   /// is provided, the integer induction variable will first be truncated to
511   /// the corresponding type. \p CanonicalIV is the scalar value generated for
512   /// the canonical induction variable.
513   void widenIntOrFpInduction(PHINode *IV, const InductionDescriptor &ID,
514                              Value *Start, TruncInst *Trunc, VPValue *Def,
515                              VPTransformState &State, Value *CanonicalIV);
516 
517   /// Construct the vector value of a scalarized value \p V one lane at a time.
518   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
519                                  VPTransformState &State);
520 
521   /// Try to vectorize interleaved access group \p Group with the base address
522   /// given in \p Addr, optionally masking the vector operations if \p
523   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
524   /// values in the vectorized loop.
525   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
526                                 ArrayRef<VPValue *> VPDefs,
527                                 VPTransformState &State, VPValue *Addr,
528                                 ArrayRef<VPValue *> StoredValues,
529                                 VPValue *BlockInMask = nullptr);
530 
531   /// Set the debug location in the builder \p Ptr using the debug location in
532   /// \p V. If \p Ptr is None then it uses the class member's Builder.
533   void setDebugLocFromInst(const Value *V,
534                            Optional<IRBuilder<> *> CustomBuilder = None);
535 
536   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
537   void fixNonInductionPHIs(VPTransformState &State);
538 
539   /// Returns true if the reordering of FP operations is not allowed, but we are
540   /// able to vectorize with strict in-order reductions for the given RdxDesc.
541   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
542 
543   /// Create a broadcast instruction. This method generates a broadcast
544   /// instruction (shuffle) for loop invariant values and for the induction
545   /// value. If this is the induction variable then we extend it to N, N+1, ...
546   /// this is needed because each iteration in the loop corresponds to a SIMD
547   /// element.
548   virtual Value *getBroadcastInstrs(Value *V);
549 
550   /// Add metadata from one instruction to another.
551   ///
552   /// This includes both the original MDs from \p From and additional ones (\see
553   /// addNewMetadata).  Use this for *newly created* instructions in the vector
554   /// loop.
555   void addMetadata(Instruction *To, Instruction *From);
556 
557   /// Similar to the previous function but it adds the metadata to a
558   /// vector of instructions.
559   void addMetadata(ArrayRef<Value *> To, Instruction *From);
560 
561 protected:
562   friend class LoopVectorizationPlanner;
563 
564   /// A small list of PHINodes.
565   using PhiVector = SmallVector<PHINode *, 4>;
566 
567   /// A type for scalarized values in the new loop. Each value from the
568   /// original loop, when scalarized, is represented by UF x VF scalar values
569   /// in the new unrolled loop, where UF is the unroll factor and VF is the
570   /// vectorization factor.
571   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
572 
573   /// Set up the values of the IVs correctly when exiting the vector loop.
574   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
575                     Value *CountRoundDown, Value *EndValue,
576                     BasicBlock *MiddleBlock);
577 
578   /// Introduce a conditional branch (on true, condition to be set later) at the
579   /// end of the header=latch connecting it to itself (across the backedge) and
580   /// to the exit block of \p L.
581   void createHeaderBranch(Loop *L);
582 
583   /// Handle all cross-iteration phis in the header.
584   void fixCrossIterationPHIs(VPTransformState &State);
585 
586   /// Create the exit value of first order recurrences in the middle block and
587   /// update their users.
588   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
589                                VPTransformState &State);
590 
591   /// Create code for the loop exit value of the reduction.
592   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
593 
594   /// Clear NSW/NUW flags from reduction instructions if necessary.
595   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
596                                VPTransformState &State);
597 
598   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
599   /// means we need to add the appropriate incoming value from the middle
600   /// block as exiting edges from the scalar epilogue loop (if present) are
601   /// already in place, and we exit the vector loop exclusively to the middle
602   /// block.
603   void fixLCSSAPHIs(VPTransformState &State);
604 
605   /// Iteratively sink the scalarized operands of a predicated instruction into
606   /// the block that was created for it.
607   void sinkScalarOperands(Instruction *PredInst);
608 
609   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
610   /// represented as.
611   void truncateToMinimalBitwidths(VPTransformState &State);
612 
613   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
614   /// variable on which to base the steps, \p Step is the size of the step, and
615   /// \p EntryVal is the value from the original loop that maps to the steps.
616   /// Note that \p EntryVal doesn't have to be an induction variable - it
617   /// can also be a truncate instruction.
618   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
619                         const InductionDescriptor &ID, VPValue *Def,
620                         VPTransformState &State);
621 
622   /// Create a vector induction phi node based on an existing scalar one. \p
623   /// EntryVal is the value from the original loop that maps to the vector phi
624   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
625   /// truncate instruction, instead of widening the original IV, we widen a
626   /// version of the IV truncated to \p EntryVal's type.
627   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
628                                        Value *Step, Value *Start,
629                                        Instruction *EntryVal, VPValue *Def,
630                                        VPTransformState &State);
631 
632   /// Returns true if an instruction \p I should be scalarized instead of
633   /// vectorized for the chosen vectorization factor.
634   bool shouldScalarizeInstruction(Instruction *I) const;
635 
636   /// Returns true if we should generate a scalar version of \p IV.
637   bool needsScalarInduction(Instruction *IV) const;
638 
639   /// Returns (and creates if needed) the original loop trip count.
640   Value *getOrCreateTripCount(Loop *NewLoop);
641 
642   /// Returns (and creates if needed) the trip count of the widened loop.
643   Value *getOrCreateVectorTripCount(Loop *NewLoop);
644 
645   /// Returns a bitcasted value to the requested vector type.
646   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
647   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
648                                 const DataLayout &DL);
649 
650   /// Emit a bypass check to see if the vector trip count is zero, including if
651   /// it overflows.
652   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
653 
654   /// Emit a bypass check to see if all of the SCEV assumptions we've
655   /// had to make are correct. Returns the block containing the checks or
656   /// nullptr if no checks have been added.
657   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
658 
659   /// Emit bypass checks to check any memory assumptions we may have made.
660   /// Returns the block containing the checks or nullptr if no checks have been
661   /// added.
662   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
663 
664   /// Compute the transformed value of Index at offset StartValue using step
665   /// StepValue.
666   /// For integer induction, returns StartValue + Index * StepValue.
667   /// For pointer induction, returns StartValue[Index * StepValue].
668   /// FIXME: The newly created binary instructions should contain nsw/nuw
669   /// flags, which can be found from the original scalar operations.
670   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
671                               const DataLayout &DL,
672                               const InductionDescriptor &ID,
673                               BasicBlock *VectorHeader) const;
674 
675   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
676   /// vector loop preheader, middle block and scalar preheader. Also
677   /// allocate a loop object for the new vector loop and return it.
678   Loop *createVectorLoopSkeleton(StringRef Prefix);
679 
680   /// Create new phi nodes for the induction variables to resume iteration count
681   /// in the scalar epilogue, from where the vectorized loop left off.
682   /// In cases where the loop skeleton is more complicated (eg. epilogue
683   /// vectorization) and the resume values can come from an additional bypass
684   /// block, the \p AdditionalBypass pair provides information about the bypass
685   /// block and the end value on the edge from bypass to this loop.
686   void createInductionResumeValues(
687       Loop *L,
688       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
689 
690   /// Complete the loop skeleton by adding debug MDs, creating appropriate
691   /// conditional branches in the middle block, preparing the builder and
692   /// running the verifier. Take in the vector loop \p L as argument, and return
693   /// the preheader of the completed vector loop.
694   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
695 
696   /// Add additional metadata to \p To that was not present on \p Orig.
697   ///
698   /// Currently this is used to add the noalias annotations based on the
699   /// inserted memchecks.  Use this for instructions that are *cloned* into the
700   /// vector loop.
701   void addNewMetadata(Instruction *To, const Instruction *Orig);
702 
703   /// Collect poison-generating recipes that may generate a poison value that is
704   /// used after vectorization, even when their operands are not poison. Those
705   /// recipes meet the following conditions:
706   ///  * Contribute to the address computation of a recipe generating a widen
707   ///    memory load/store (VPWidenMemoryInstructionRecipe or
708   ///    VPInterleaveRecipe).
709   ///  * Such a widen memory load/store has at least one underlying Instruction
710   ///    that is in a basic block that needs predication and after vectorization
711   ///    the generated instruction won't be predicated.
712   void collectPoisonGeneratingRecipes(VPTransformState &State);
713 
714   /// Allow subclasses to override and print debug traces before/after vplan
715   /// execution, when trace information is requested.
716   virtual void printDebugTracesAtStart(){};
717   virtual void printDebugTracesAtEnd(){};
718 
719   /// The original loop.
720   Loop *OrigLoop;
721 
722   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
723   /// dynamic knowledge to simplify SCEV expressions and converts them to a
724   /// more usable form.
725   PredicatedScalarEvolution &PSE;
726 
727   /// Loop Info.
728   LoopInfo *LI;
729 
730   /// Dominator Tree.
731   DominatorTree *DT;
732 
733   /// Alias Analysis.
734   AAResults *AA;
735 
736   /// Target Library Info.
737   const TargetLibraryInfo *TLI;
738 
739   /// Target Transform Info.
740   const TargetTransformInfo *TTI;
741 
742   /// Assumption Cache.
743   AssumptionCache *AC;
744 
745   /// Interface to emit optimization remarks.
746   OptimizationRemarkEmitter *ORE;
747 
748   /// LoopVersioning.  It's only set up (non-null) if memchecks were
749   /// used.
750   ///
751   /// This is currently only used to add no-alias metadata based on the
752   /// memchecks.  The actually versioning is performed manually.
753   std::unique_ptr<LoopVersioning> LVer;
754 
755   /// The vectorization SIMD factor to use. Each vector will have this many
756   /// vector elements.
757   ElementCount VF;
758 
759   /// The vectorization unroll factor to use. Each scalar is vectorized to this
760   /// many different vector instructions.
761   unsigned UF;
762 
763   /// The builder that we use
764   IRBuilder<> Builder;
765 
766   // --- Vectorization state ---
767 
768   /// The vector-loop preheader.
769   BasicBlock *LoopVectorPreHeader;
770 
771   /// The scalar-loop preheader.
772   BasicBlock *LoopScalarPreHeader;
773 
774   /// Middle Block between the vector and the scalar.
775   BasicBlock *LoopMiddleBlock;
776 
777   /// The unique ExitBlock of the scalar loop if one exists.  Note that
778   /// there can be multiple exiting edges reaching this block.
779   BasicBlock *LoopExitBlock;
780 
781   /// The vector loop body.
782   BasicBlock *LoopVectorBody;
783 
784   /// The scalar loop body.
785   BasicBlock *LoopScalarBody;
786 
787   /// A list of all bypass blocks. The first block is the entry of the loop.
788   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
789 
790   /// Store instructions that were predicated.
791   SmallVector<Instruction *, 4> PredicatedInstructions;
792 
793   /// Trip count of the original loop.
794   Value *TripCount = nullptr;
795 
796   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
797   Value *VectorTripCount = nullptr;
798 
799   /// The legality analysis.
800   LoopVectorizationLegality *Legal;
801 
802   /// The profitablity analysis.
803   LoopVectorizationCostModel *Cost;
804 
805   // Record whether runtime checks are added.
806   bool AddedSafetyChecks = false;
807 
808   // Holds the end values for each induction variable. We save the end values
809   // so we can later fix-up the external users of the induction variables.
810   DenseMap<PHINode *, Value *> IVEndValues;
811 
812   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
813   // fixed up at the end of vector code generation.
814   SmallVector<PHINode *, 8> OrigPHIsToFix;
815 
816   /// BFI and PSI are used to check for profile guided size optimizations.
817   BlockFrequencyInfo *BFI;
818   ProfileSummaryInfo *PSI;
819 
820   // Whether this loop should be optimized for size based on profile guided size
821   // optimizatios.
822   bool OptForSizeBasedOnProfile;
823 
824   /// Structure to hold information about generated runtime checks, responsible
825   /// for cleaning the checks, if vectorization turns out unprofitable.
826   GeneratedRTChecks &RTChecks;
827 };
828 
829 class InnerLoopUnroller : public InnerLoopVectorizer {
830 public:
831   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
832                     LoopInfo *LI, DominatorTree *DT,
833                     const TargetLibraryInfo *TLI,
834                     const TargetTransformInfo *TTI, AssumptionCache *AC,
835                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
836                     LoopVectorizationLegality *LVL,
837                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
838                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
839       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
840                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
841                             BFI, PSI, Check) {}
842 
843 private:
844   Value *getBroadcastInstrs(Value *V) override;
845 };
846 
847 /// Encapsulate information regarding vectorization of a loop and its epilogue.
848 /// This information is meant to be updated and used across two stages of
849 /// epilogue vectorization.
850 struct EpilogueLoopVectorizationInfo {
851   ElementCount MainLoopVF = ElementCount::getFixed(0);
852   unsigned MainLoopUF = 0;
853   ElementCount EpilogueVF = ElementCount::getFixed(0);
854   unsigned EpilogueUF = 0;
855   BasicBlock *MainLoopIterationCountCheck = nullptr;
856   BasicBlock *EpilogueIterationCountCheck = nullptr;
857   BasicBlock *SCEVSafetyCheck = nullptr;
858   BasicBlock *MemSafetyCheck = nullptr;
859   Value *TripCount = nullptr;
860   Value *VectorTripCount = nullptr;
861 
862   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
863                                 ElementCount EVF, unsigned EUF)
864       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
865     assert(EUF == 1 &&
866            "A high UF for the epilogue loop is likely not beneficial.");
867   }
868 };
869 
870 /// An extension of the inner loop vectorizer that creates a skeleton for a
871 /// vectorized loop that has its epilogue (residual) also vectorized.
872 /// The idea is to run the vplan on a given loop twice, firstly to setup the
873 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
874 /// from the first step and vectorize the epilogue.  This is achieved by
875 /// deriving two concrete strategy classes from this base class and invoking
876 /// them in succession from the loop vectorizer planner.
877 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
878 public:
879   InnerLoopAndEpilogueVectorizer(
880       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
881       DominatorTree *DT, const TargetLibraryInfo *TLI,
882       const TargetTransformInfo *TTI, AssumptionCache *AC,
883       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
884       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
885       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
886       GeneratedRTChecks &Checks)
887       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
888                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
889                             Checks),
890         EPI(EPI) {}
891 
892   // Override this function to handle the more complex control flow around the
893   // three loops.
894   std::pair<BasicBlock *, Value *>
895   createVectorizedLoopSkeleton() final override {
896     return createEpilogueVectorizedLoopSkeleton();
897   }
898 
899   /// The interface for creating a vectorized skeleton using one of two
900   /// different strategies, each corresponding to one execution of the vplan
901   /// as described above.
902   virtual std::pair<BasicBlock *, Value *>
903   createEpilogueVectorizedLoopSkeleton() = 0;
904 
905   /// Holds and updates state information required to vectorize the main loop
906   /// and its epilogue in two separate passes. This setup helps us avoid
907   /// regenerating and recomputing runtime safety checks. It also helps us to
908   /// shorten the iteration-count-check path length for the cases where the
909   /// iteration count of the loop is so small that the main vector loop is
910   /// completely skipped.
911   EpilogueLoopVectorizationInfo &EPI;
912 };
913 
914 /// A specialized derived class of inner loop vectorizer that performs
915 /// vectorization of *main* loops in the process of vectorizing loops and their
916 /// epilogues.
917 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
918 public:
919   EpilogueVectorizerMainLoop(
920       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
921       DominatorTree *DT, const TargetLibraryInfo *TLI,
922       const TargetTransformInfo *TTI, AssumptionCache *AC,
923       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
924       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
925       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
926       GeneratedRTChecks &Check)
927       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
928                                        EPI, LVL, CM, BFI, PSI, Check) {}
929   /// Implements the interface for creating a vectorized skeleton using the
930   /// *main loop* strategy (ie the first pass of vplan execution).
931   std::pair<BasicBlock *, Value *>
932   createEpilogueVectorizedLoopSkeleton() final override;
933 
934 protected:
935   /// Emits an iteration count bypass check once for the main loop (when \p
936   /// ForEpilogue is false) and once for the epilogue loop (when \p
937   /// ForEpilogue is true).
938   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
939                                              bool ForEpilogue);
940   void printDebugTracesAtStart() override;
941   void printDebugTracesAtEnd() override;
942 };
943 
944 // A specialized derived class of inner loop vectorizer that performs
945 // vectorization of *epilogue* loops in the process of vectorizing loops and
946 // their epilogues.
947 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
948 public:
949   EpilogueVectorizerEpilogueLoop(
950       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
951       DominatorTree *DT, const TargetLibraryInfo *TLI,
952       const TargetTransformInfo *TTI, AssumptionCache *AC,
953       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
954       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
955       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
956       GeneratedRTChecks &Checks)
957       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
958                                        EPI, LVL, CM, BFI, PSI, Checks) {}
959   /// Implements the interface for creating a vectorized skeleton using the
960   /// *epilogue loop* strategy (ie the second pass of vplan execution).
961   std::pair<BasicBlock *, Value *>
962   createEpilogueVectorizedLoopSkeleton() final override;
963 
964 protected:
965   /// Emits an iteration count bypass check after the main vector loop has
966   /// finished to see if there are any iterations left to execute by either
967   /// the vector epilogue or the scalar epilogue.
968   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
969                                                       BasicBlock *Bypass,
970                                                       BasicBlock *Insert);
971   void printDebugTracesAtStart() override;
972   void printDebugTracesAtEnd() override;
973 };
974 } // end namespace llvm
975 
976 /// Look for a meaningful debug location on the instruction or it's
977 /// operands.
978 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
979   if (!I)
980     return I;
981 
982   DebugLoc Empty;
983   if (I->getDebugLoc() != Empty)
984     return I;
985 
986   for (Use &Op : I->operands()) {
987     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
988       if (OpInst->getDebugLoc() != Empty)
989         return OpInst;
990   }
991 
992   return I;
993 }
994 
995 void InnerLoopVectorizer::setDebugLocFromInst(
996     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
997   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
998   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
999     const DILocation *DIL = Inst->getDebugLoc();
1000 
1001     // When a FSDiscriminator is enabled, we don't need to add the multiply
1002     // factors to the discriminators.
1003     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1004         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1005       // FIXME: For scalable vectors, assume vscale=1.
1006       auto NewDIL =
1007           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1008       if (NewDIL)
1009         B->SetCurrentDebugLocation(NewDIL.getValue());
1010       else
1011         LLVM_DEBUG(dbgs()
1012                    << "Failed to create new discriminator: "
1013                    << DIL->getFilename() << " Line: " << DIL->getLine());
1014     } else
1015       B->SetCurrentDebugLocation(DIL);
1016   } else
1017     B->SetCurrentDebugLocation(DebugLoc());
1018 }
1019 
1020 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1021 /// is passed, the message relates to that particular instruction.
1022 #ifndef NDEBUG
1023 static void debugVectorizationMessage(const StringRef Prefix,
1024                                       const StringRef DebugMsg,
1025                                       Instruction *I) {
1026   dbgs() << "LV: " << Prefix << DebugMsg;
1027   if (I != nullptr)
1028     dbgs() << " " << *I;
1029   else
1030     dbgs() << '.';
1031   dbgs() << '\n';
1032 }
1033 #endif
1034 
1035 /// Create an analysis remark that explains why vectorization failed
1036 ///
1037 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1038 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1039 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1040 /// the location of the remark.  \return the remark object that can be
1041 /// streamed to.
1042 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1043     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1044   Value *CodeRegion = TheLoop->getHeader();
1045   DebugLoc DL = TheLoop->getStartLoc();
1046 
1047   if (I) {
1048     CodeRegion = I->getParent();
1049     // If there is no debug location attached to the instruction, revert back to
1050     // using the loop's.
1051     if (I->getDebugLoc())
1052       DL = I->getDebugLoc();
1053   }
1054 
1055   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1056 }
1057 
1058 namespace llvm {
1059 
1060 /// Return a value for Step multiplied by VF.
1061 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
1062                        int64_t Step) {
1063   assert(Ty->isIntegerTy() && "Expected an integer step");
1064   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1065   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1066 }
1067 
1068 /// Return the runtime value for VF.
1069 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1070   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1071   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1072 }
1073 
1074 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) {
1075   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1076   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1077   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1078   return B.CreateUIToFP(RuntimeVF, FTy);
1079 }
1080 
1081 void reportVectorizationFailure(const StringRef DebugMsg,
1082                                 const StringRef OREMsg, const StringRef ORETag,
1083                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1084                                 Instruction *I) {
1085   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1086   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1087   ORE->emit(
1088       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1089       << "loop not vectorized: " << OREMsg);
1090 }
1091 
1092 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1093                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1094                              Instruction *I) {
1095   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1096   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1097   ORE->emit(
1098       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1099       << Msg);
1100 }
1101 
1102 } // end namespace llvm
1103 
1104 #ifndef NDEBUG
1105 /// \return string containing a file name and a line # for the given loop.
1106 static std::string getDebugLocString(const Loop *L) {
1107   std::string Result;
1108   if (L) {
1109     raw_string_ostream OS(Result);
1110     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1111       LoopDbgLoc.print(OS);
1112     else
1113       // Just print the module name.
1114       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1115     OS.flush();
1116   }
1117   return Result;
1118 }
1119 #endif
1120 
1121 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1122                                          const Instruction *Orig) {
1123   // If the loop was versioned with memchecks, add the corresponding no-alias
1124   // metadata.
1125   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1126     LVer->annotateInstWithNoAlias(To, Orig);
1127 }
1128 
1129 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1130     VPTransformState &State) {
1131 
1132   // Collect recipes in the backward slice of `Root` that may generate a poison
1133   // value that is used after vectorization.
1134   SmallPtrSet<VPRecipeBase *, 16> Visited;
1135   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1136     SmallVector<VPRecipeBase *, 16> Worklist;
1137     Worklist.push_back(Root);
1138 
1139     // Traverse the backward slice of Root through its use-def chain.
1140     while (!Worklist.empty()) {
1141       VPRecipeBase *CurRec = Worklist.back();
1142       Worklist.pop_back();
1143 
1144       if (!Visited.insert(CurRec).second)
1145         continue;
1146 
1147       // Prune search if we find another recipe generating a widen memory
1148       // instruction. Widen memory instructions involved in address computation
1149       // will lead to gather/scatter instructions, which don't need to be
1150       // handled.
1151       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1152           isa<VPInterleaveRecipe>(CurRec) ||
1153           isa<VPCanonicalIVPHIRecipe>(CurRec))
1154         continue;
1155 
1156       // This recipe contributes to the address computation of a widen
1157       // load/store. Collect recipe if its underlying instruction has
1158       // poison-generating flags.
1159       Instruction *Instr = CurRec->getUnderlyingInstr();
1160       if (Instr && Instr->hasPoisonGeneratingFlags())
1161         State.MayGeneratePoisonRecipes.insert(CurRec);
1162 
1163       // Add new definitions to the worklist.
1164       for (VPValue *operand : CurRec->operands())
1165         if (VPDef *OpDef = operand->getDef())
1166           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1167     }
1168   });
1169 
1170   // Traverse all the recipes in the VPlan and collect the poison-generating
1171   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1172   // VPInterleaveRecipe.
1173   auto Iter = depth_first(
1174       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1175   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1176     for (VPRecipeBase &Recipe : *VPBB) {
1177       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1178         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1179         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1180         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1181             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1182           collectPoisonGeneratingInstrsInBackwardSlice(
1183               cast<VPRecipeBase>(AddrDef));
1184       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1185         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1186         if (AddrDef) {
1187           // Check if any member of the interleave group needs predication.
1188           const InterleaveGroup<Instruction> *InterGroup =
1189               InterleaveRec->getInterleaveGroup();
1190           bool NeedPredication = false;
1191           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1192                I < NumMembers; ++I) {
1193             Instruction *Member = InterGroup->getMember(I);
1194             if (Member)
1195               NeedPredication |=
1196                   Legal->blockNeedsPredication(Member->getParent());
1197           }
1198 
1199           if (NeedPredication)
1200             collectPoisonGeneratingInstrsInBackwardSlice(
1201                 cast<VPRecipeBase>(AddrDef));
1202         }
1203       }
1204     }
1205   }
1206 }
1207 
1208 void InnerLoopVectorizer::addMetadata(Instruction *To,
1209                                       Instruction *From) {
1210   propagateMetadata(To, From);
1211   addNewMetadata(To, From);
1212 }
1213 
1214 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1215                                       Instruction *From) {
1216   for (Value *V : To) {
1217     if (Instruction *I = dyn_cast<Instruction>(V))
1218       addMetadata(I, From);
1219   }
1220 }
1221 
1222 namespace llvm {
1223 
1224 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1225 // lowered.
1226 enum ScalarEpilogueLowering {
1227 
1228   // The default: allowing scalar epilogues.
1229   CM_ScalarEpilogueAllowed,
1230 
1231   // Vectorization with OptForSize: don't allow epilogues.
1232   CM_ScalarEpilogueNotAllowedOptSize,
1233 
1234   // A special case of vectorisation with OptForSize: loops with a very small
1235   // trip count are considered for vectorization under OptForSize, thereby
1236   // making sure the cost of their loop body is dominant, free of runtime
1237   // guards and scalar iteration overheads.
1238   CM_ScalarEpilogueNotAllowedLowTripLoop,
1239 
1240   // Loop hint predicate indicating an epilogue is undesired.
1241   CM_ScalarEpilogueNotNeededUsePredicate,
1242 
1243   // Directive indicating we must either tail fold or not vectorize
1244   CM_ScalarEpilogueNotAllowedUsePredicate
1245 };
1246 
1247 /// ElementCountComparator creates a total ordering for ElementCount
1248 /// for the purposes of using it in a set structure.
1249 struct ElementCountComparator {
1250   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1251     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1252            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1253   }
1254 };
1255 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1256 
1257 /// LoopVectorizationCostModel - estimates the expected speedups due to
1258 /// vectorization.
1259 /// In many cases vectorization is not profitable. This can happen because of
1260 /// a number of reasons. In this class we mainly attempt to predict the
1261 /// expected speedup/slowdowns due to the supported instruction set. We use the
1262 /// TargetTransformInfo to query the different backends for the cost of
1263 /// different operations.
1264 class LoopVectorizationCostModel {
1265 public:
1266   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1267                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1268                              LoopVectorizationLegality *Legal,
1269                              const TargetTransformInfo &TTI,
1270                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1271                              AssumptionCache *AC,
1272                              OptimizationRemarkEmitter *ORE, const Function *F,
1273                              const LoopVectorizeHints *Hints,
1274                              InterleavedAccessInfo &IAI)
1275       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1276         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1277         Hints(Hints), InterleaveInfo(IAI) {}
1278 
1279   /// \return An upper bound for the vectorization factors (both fixed and
1280   /// scalable). If the factors are 0, vectorization and interleaving should be
1281   /// avoided up front.
1282   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1283 
1284   /// \return True if runtime checks are required for vectorization, and false
1285   /// otherwise.
1286   bool runtimeChecksRequired();
1287 
1288   /// \return The most profitable vectorization factor and the cost of that VF.
1289   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1290   /// then this vectorization factor will be selected if vectorization is
1291   /// possible.
1292   VectorizationFactor
1293   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1294 
1295   VectorizationFactor
1296   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1297                                     const LoopVectorizationPlanner &LVP);
1298 
1299   /// Setup cost-based decisions for user vectorization factor.
1300   /// \return true if the UserVF is a feasible VF to be chosen.
1301   bool selectUserVectorizationFactor(ElementCount UserVF) {
1302     collectUniformsAndScalars(UserVF);
1303     collectInstsToScalarize(UserVF);
1304     return expectedCost(UserVF).first.isValid();
1305   }
1306 
1307   /// \return The size (in bits) of the smallest and widest types in the code
1308   /// that needs to be vectorized. We ignore values that remain scalar such as
1309   /// 64 bit loop indices.
1310   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1311 
1312   /// \return The desired interleave count.
1313   /// If interleave count has been specified by metadata it will be returned.
1314   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1315   /// are the selected vectorization factor and the cost of the selected VF.
1316   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1317 
1318   /// Memory access instruction may be vectorized in more than one way.
1319   /// Form of instruction after vectorization depends on cost.
1320   /// This function takes cost-based decisions for Load/Store instructions
1321   /// and collects them in a map. This decisions map is used for building
1322   /// the lists of loop-uniform and loop-scalar instructions.
1323   /// The calculated cost is saved with widening decision in order to
1324   /// avoid redundant calculations.
1325   void setCostBasedWideningDecision(ElementCount VF);
1326 
1327   /// A struct that represents some properties of the register usage
1328   /// of a loop.
1329   struct RegisterUsage {
1330     /// Holds the number of loop invariant values that are used in the loop.
1331     /// The key is ClassID of target-provided register class.
1332     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1333     /// Holds the maximum number of concurrent live intervals in the loop.
1334     /// The key is ClassID of target-provided register class.
1335     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1336   };
1337 
1338   /// \return Returns information about the register usages of the loop for the
1339   /// given vectorization factors.
1340   SmallVector<RegisterUsage, 8>
1341   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1342 
1343   /// Collect values we want to ignore in the cost model.
1344   void collectValuesToIgnore();
1345 
1346   /// Collect all element types in the loop for which widening is needed.
1347   void collectElementTypesForWidening();
1348 
1349   /// Split reductions into those that happen in the loop, and those that happen
1350   /// outside. In loop reductions are collected into InLoopReductionChains.
1351   void collectInLoopReductions();
1352 
1353   /// Returns true if we should use strict in-order reductions for the given
1354   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1355   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1356   /// of FP operations.
1357   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1358     return !Hints->allowReordering() && RdxDesc.isOrdered();
1359   }
1360 
1361   /// \returns The smallest bitwidth each instruction can be represented with.
1362   /// The vector equivalents of these instructions should be truncated to this
1363   /// type.
1364   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1365     return MinBWs;
1366   }
1367 
1368   /// \returns True if it is more profitable to scalarize instruction \p I for
1369   /// vectorization factor \p VF.
1370   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1371     assert(VF.isVector() &&
1372            "Profitable to scalarize relevant only for VF > 1.");
1373 
1374     // Cost model is not run in the VPlan-native path - return conservative
1375     // result until this changes.
1376     if (EnableVPlanNativePath)
1377       return false;
1378 
1379     auto Scalars = InstsToScalarize.find(VF);
1380     assert(Scalars != InstsToScalarize.end() &&
1381            "VF not yet analyzed for scalarization profitability");
1382     return Scalars->second.find(I) != Scalars->second.end();
1383   }
1384 
1385   /// Returns true if \p I is known to be uniform after vectorization.
1386   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1387     if (VF.isScalar())
1388       return true;
1389 
1390     // Cost model is not run in the VPlan-native path - return conservative
1391     // result until this changes.
1392     if (EnableVPlanNativePath)
1393       return false;
1394 
1395     auto UniformsPerVF = Uniforms.find(VF);
1396     assert(UniformsPerVF != Uniforms.end() &&
1397            "VF not yet analyzed for uniformity");
1398     return UniformsPerVF->second.count(I);
1399   }
1400 
1401   /// Returns true if \p I is known to be scalar after vectorization.
1402   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1403     if (VF.isScalar())
1404       return true;
1405 
1406     // Cost model is not run in the VPlan-native path - return conservative
1407     // result until this changes.
1408     if (EnableVPlanNativePath)
1409       return false;
1410 
1411     auto ScalarsPerVF = Scalars.find(VF);
1412     assert(ScalarsPerVF != Scalars.end() &&
1413            "Scalar values are not calculated for VF");
1414     return ScalarsPerVF->second.count(I);
1415   }
1416 
1417   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1418   /// for vectorization factor \p VF.
1419   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1420     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1421            !isProfitableToScalarize(I, VF) &&
1422            !isScalarAfterVectorization(I, VF);
1423   }
1424 
1425   /// Decision that was taken during cost calculation for memory instruction.
1426   enum InstWidening {
1427     CM_Unknown,
1428     CM_Widen,         // For consecutive accesses with stride +1.
1429     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1430     CM_Interleave,
1431     CM_GatherScatter,
1432     CM_Scalarize
1433   };
1434 
1435   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1436   /// instruction \p I and vector width \p VF.
1437   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1438                            InstructionCost Cost) {
1439     assert(VF.isVector() && "Expected VF >=2");
1440     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1441   }
1442 
1443   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1444   /// interleaving group \p Grp and vector width \p VF.
1445   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1446                            ElementCount VF, InstWidening W,
1447                            InstructionCost Cost) {
1448     assert(VF.isVector() && "Expected VF >=2");
1449     /// Broadcast this decicion to all instructions inside the group.
1450     /// But the cost will be assigned to one instruction only.
1451     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1452       if (auto *I = Grp->getMember(i)) {
1453         if (Grp->getInsertPos() == I)
1454           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1455         else
1456           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1457       }
1458     }
1459   }
1460 
1461   /// Return the cost model decision for the given instruction \p I and vector
1462   /// width \p VF. Return CM_Unknown if this instruction did not pass
1463   /// through the cost modeling.
1464   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1465     assert(VF.isVector() && "Expected VF to be a vector VF");
1466     // Cost model is not run in the VPlan-native path - return conservative
1467     // result until this changes.
1468     if (EnableVPlanNativePath)
1469       return CM_GatherScatter;
1470 
1471     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1472     auto Itr = WideningDecisions.find(InstOnVF);
1473     if (Itr == WideningDecisions.end())
1474       return CM_Unknown;
1475     return Itr->second.first;
1476   }
1477 
1478   /// Return the vectorization cost for the given instruction \p I and vector
1479   /// width \p VF.
1480   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1481     assert(VF.isVector() && "Expected VF >=2");
1482     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1483     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1484            "The cost is not calculated");
1485     return WideningDecisions[InstOnVF].second;
1486   }
1487 
1488   /// Return True if instruction \p I is an optimizable truncate whose operand
1489   /// is an induction variable. Such a truncate will be removed by adding a new
1490   /// induction variable with the destination type.
1491   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1492     // If the instruction is not a truncate, return false.
1493     auto *Trunc = dyn_cast<TruncInst>(I);
1494     if (!Trunc)
1495       return false;
1496 
1497     // Get the source and destination types of the truncate.
1498     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1499     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1500 
1501     // If the truncate is free for the given types, return false. Replacing a
1502     // free truncate with an induction variable would add an induction variable
1503     // update instruction to each iteration of the loop. We exclude from this
1504     // check the primary induction variable since it will need an update
1505     // instruction regardless.
1506     Value *Op = Trunc->getOperand(0);
1507     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1508       return false;
1509 
1510     // If the truncated value is not an induction variable, return false.
1511     return Legal->isInductionPhi(Op);
1512   }
1513 
1514   /// Collects the instructions to scalarize for each predicated instruction in
1515   /// the loop.
1516   void collectInstsToScalarize(ElementCount VF);
1517 
1518   /// Collect Uniform and Scalar values for the given \p VF.
1519   /// The sets depend on CM decision for Load/Store instructions
1520   /// that may be vectorized as interleave, gather-scatter or scalarized.
1521   void collectUniformsAndScalars(ElementCount VF) {
1522     // Do the analysis once.
1523     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1524       return;
1525     setCostBasedWideningDecision(VF);
1526     collectLoopUniforms(VF);
1527     collectLoopScalars(VF);
1528   }
1529 
1530   /// Returns true if the target machine supports masked store operation
1531   /// for the given \p DataType and kind of access to \p Ptr.
1532   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1533     return Legal->isConsecutivePtr(DataType, Ptr) &&
1534            TTI.isLegalMaskedStore(DataType, Alignment);
1535   }
1536 
1537   /// Returns true if the target machine supports masked load operation
1538   /// for the given \p DataType and kind of access to \p Ptr.
1539   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1540     return Legal->isConsecutivePtr(DataType, Ptr) &&
1541            TTI.isLegalMaskedLoad(DataType, Alignment);
1542   }
1543 
1544   /// Returns true if the target machine can represent \p V as a masked gather
1545   /// or scatter operation.
1546   bool isLegalGatherOrScatter(Value *V,
1547                               ElementCount VF = ElementCount::getFixed(1)) {
1548     bool LI = isa<LoadInst>(V);
1549     bool SI = isa<StoreInst>(V);
1550     if (!LI && !SI)
1551       return false;
1552     auto *Ty = getLoadStoreType(V);
1553     Align Align = getLoadStoreAlignment(V);
1554     if (VF.isVector())
1555       Ty = VectorType::get(Ty, VF);
1556     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1557            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1558   }
1559 
1560   /// Returns true if the target machine supports all of the reduction
1561   /// variables found for the given VF.
1562   bool canVectorizeReductions(ElementCount VF) const {
1563     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1564       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1565       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1566     }));
1567   }
1568 
1569   /// Returns true if \p I is an instruction that will be scalarized with
1570   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1571   /// instructions include conditional stores and instructions that may divide
1572   /// by zero.
1573   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1574 
1575   // Returns true if \p I is an instruction that will be predicated either
1576   // through scalar predication or masked load/store or masked gather/scatter.
1577   // \p VF is the vectorization factor that will be used to vectorize \p I.
1578   // Superset of instructions that return true for isScalarWithPredication.
1579   bool isPredicatedInst(Instruction *I, ElementCount VF,
1580                         bool IsKnownUniform = false) {
1581     // When we know the load is uniform and the original scalar loop was not
1582     // predicated we don't need to mark it as a predicated instruction. Any
1583     // vectorised blocks created when tail-folding are something artificial we
1584     // have introduced and we know there is always at least one active lane.
1585     // That's why we call Legal->blockNeedsPredication here because it doesn't
1586     // query tail-folding.
1587     if (IsKnownUniform && isa<LoadInst>(I) &&
1588         !Legal->blockNeedsPredication(I->getParent()))
1589       return false;
1590     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1591       return false;
1592     // Loads and stores that need some form of masked operation are predicated
1593     // instructions.
1594     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1595       return Legal->isMaskRequired(I);
1596     return isScalarWithPredication(I, VF);
1597   }
1598 
1599   /// Returns true if \p I is a memory instruction with consecutive memory
1600   /// access that can be widened.
1601   bool
1602   memoryInstructionCanBeWidened(Instruction *I,
1603                                 ElementCount VF = ElementCount::getFixed(1));
1604 
1605   /// Returns true if \p I is a memory instruction in an interleaved-group
1606   /// of memory accesses that can be vectorized with wide vector loads/stores
1607   /// and shuffles.
1608   bool
1609   interleavedAccessCanBeWidened(Instruction *I,
1610                                 ElementCount VF = ElementCount::getFixed(1));
1611 
1612   /// Check if \p Instr belongs to any interleaved access group.
1613   bool isAccessInterleaved(Instruction *Instr) {
1614     return InterleaveInfo.isInterleaved(Instr);
1615   }
1616 
1617   /// Get the interleaved access group that \p Instr belongs to.
1618   const InterleaveGroup<Instruction> *
1619   getInterleavedAccessGroup(Instruction *Instr) {
1620     return InterleaveInfo.getInterleaveGroup(Instr);
1621   }
1622 
1623   /// Returns true if we're required to use a scalar epilogue for at least
1624   /// the final iteration of the original loop.
1625   bool requiresScalarEpilogue(ElementCount VF) const {
1626     if (!isScalarEpilogueAllowed())
1627       return false;
1628     // If we might exit from anywhere but the latch, must run the exiting
1629     // iteration in scalar form.
1630     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1631       return true;
1632     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1633   }
1634 
1635   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1636   /// loop hint annotation.
1637   bool isScalarEpilogueAllowed() const {
1638     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1639   }
1640 
1641   /// Returns true if all loop blocks should be masked to fold tail loop.
1642   bool foldTailByMasking() const { return FoldTailByMasking; }
1643 
1644   /// Returns true if the instructions in this block requires predication
1645   /// for any reason, e.g. because tail folding now requires a predicate
1646   /// or because the block in the original loop was predicated.
1647   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1648     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1649   }
1650 
1651   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1652   /// nodes to the chain of instructions representing the reductions. Uses a
1653   /// MapVector to ensure deterministic iteration order.
1654   using ReductionChainMap =
1655       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1656 
1657   /// Return the chain of instructions representing an inloop reduction.
1658   const ReductionChainMap &getInLoopReductionChains() const {
1659     return InLoopReductionChains;
1660   }
1661 
1662   /// Returns true if the Phi is part of an inloop reduction.
1663   bool isInLoopReduction(PHINode *Phi) const {
1664     return InLoopReductionChains.count(Phi);
1665   }
1666 
1667   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1668   /// with factor VF.  Return the cost of the instruction, including
1669   /// scalarization overhead if it's needed.
1670   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1671 
1672   /// Estimate cost of a call instruction CI if it were vectorized with factor
1673   /// VF. Return the cost of the instruction, including scalarization overhead
1674   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1675   /// scalarized -
1676   /// i.e. either vector version isn't available, or is too expensive.
1677   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1678                                     bool &NeedToScalarize) const;
1679 
1680   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1681   /// that of B.
1682   bool isMoreProfitable(const VectorizationFactor &A,
1683                         const VectorizationFactor &B) const;
1684 
1685   /// Invalidates decisions already taken by the cost model.
1686   void invalidateCostModelingDecisions() {
1687     WideningDecisions.clear();
1688     Uniforms.clear();
1689     Scalars.clear();
1690   }
1691 
1692 private:
1693   unsigned NumPredStores = 0;
1694 
1695   /// \return An upper bound for the vectorization factors for both
1696   /// fixed and scalable vectorization, where the minimum-known number of
1697   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1698   /// disabled or unsupported, then the scalable part will be equal to
1699   /// ElementCount::getScalable(0).
1700   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1701                                            ElementCount UserVF,
1702                                            bool FoldTailByMasking);
1703 
1704   /// \return the maximized element count based on the targets vector
1705   /// registers and the loop trip-count, but limited to a maximum safe VF.
1706   /// This is a helper function of computeFeasibleMaxVF.
1707   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1708   /// issue that occurred on one of the buildbots which cannot be reproduced
1709   /// without having access to the properietary compiler (see comments on
1710   /// D98509). The issue is currently under investigation and this workaround
1711   /// will be removed as soon as possible.
1712   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1713                                        unsigned SmallestType,
1714                                        unsigned WidestType,
1715                                        const ElementCount &MaxSafeVF,
1716                                        bool FoldTailByMasking);
1717 
1718   /// \return the maximum legal scalable VF, based on the safe max number
1719   /// of elements.
1720   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1721 
1722   /// The vectorization cost is a combination of the cost itself and a boolean
1723   /// indicating whether any of the contributing operations will actually
1724   /// operate on vector values after type legalization in the backend. If this
1725   /// latter value is false, then all operations will be scalarized (i.e. no
1726   /// vectorization has actually taken place).
1727   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1728 
1729   /// Returns the expected execution cost. The unit of the cost does
1730   /// not matter because we use the 'cost' units to compare different
1731   /// vector widths. The cost that is returned is *not* normalized by
1732   /// the factor width. If \p Invalid is not nullptr, this function
1733   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1734   /// each instruction that has an Invalid cost for the given VF.
1735   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1736   VectorizationCostTy
1737   expectedCost(ElementCount VF,
1738                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1739 
1740   /// Returns the execution time cost of an instruction for a given vector
1741   /// width. Vector width of one means scalar.
1742   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1743 
1744   /// The cost-computation logic from getInstructionCost which provides
1745   /// the vector type as an output parameter.
1746   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1747                                      Type *&VectorTy);
1748 
1749   /// Return the cost of instructions in an inloop reduction pattern, if I is
1750   /// part of that pattern.
1751   Optional<InstructionCost>
1752   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1753                           TTI::TargetCostKind CostKind);
1754 
1755   /// Calculate vectorization cost of memory instruction \p I.
1756   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1757 
1758   /// The cost computation for scalarized memory instruction.
1759   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1760 
1761   /// The cost computation for interleaving group of memory instructions.
1762   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1763 
1764   /// The cost computation for Gather/Scatter instruction.
1765   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1766 
1767   /// The cost computation for widening instruction \p I with consecutive
1768   /// memory access.
1769   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1770 
1771   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1772   /// Load: scalar load + broadcast.
1773   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1774   /// element)
1775   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1776 
1777   /// Estimate the overhead of scalarizing an instruction. This is a
1778   /// convenience wrapper for the type-based getScalarizationOverhead API.
1779   InstructionCost getScalarizationOverhead(Instruction *I,
1780                                            ElementCount VF) const;
1781 
1782   /// Returns whether the instruction is a load or store and will be a emitted
1783   /// as a vector operation.
1784   bool isConsecutiveLoadOrStore(Instruction *I);
1785 
1786   /// Returns true if an artificially high cost for emulated masked memrefs
1787   /// should be used.
1788   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1789 
1790   /// Map of scalar integer values to the smallest bitwidth they can be legally
1791   /// represented as. The vector equivalents of these values should be truncated
1792   /// to this type.
1793   MapVector<Instruction *, uint64_t> MinBWs;
1794 
1795   /// A type representing the costs for instructions if they were to be
1796   /// scalarized rather than vectorized. The entries are Instruction-Cost
1797   /// pairs.
1798   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1799 
1800   /// A set containing all BasicBlocks that are known to present after
1801   /// vectorization as a predicated block.
1802   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1803 
1804   /// Records whether it is allowed to have the original scalar loop execute at
1805   /// least once. This may be needed as a fallback loop in case runtime
1806   /// aliasing/dependence checks fail, or to handle the tail/remainder
1807   /// iterations when the trip count is unknown or doesn't divide by the VF,
1808   /// or as a peel-loop to handle gaps in interleave-groups.
1809   /// Under optsize and when the trip count is very small we don't allow any
1810   /// iterations to execute in the scalar loop.
1811   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1812 
1813   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1814   bool FoldTailByMasking = false;
1815 
1816   /// A map holding scalar costs for different vectorization factors. The
1817   /// presence of a cost for an instruction in the mapping indicates that the
1818   /// instruction will be scalarized when vectorizing with the associated
1819   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1820   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1821 
1822   /// Holds the instructions known to be uniform after vectorization.
1823   /// The data is collected per VF.
1824   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1825 
1826   /// Holds the instructions known to be scalar after vectorization.
1827   /// The data is collected per VF.
1828   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1829 
1830   /// Holds the instructions (address computations) that are forced to be
1831   /// scalarized.
1832   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1833 
1834   /// PHINodes of the reductions that should be expanded in-loop along with
1835   /// their associated chains of reduction operations, in program order from top
1836   /// (PHI) to bottom
1837   ReductionChainMap InLoopReductionChains;
1838 
1839   /// A Map of inloop reduction operations and their immediate chain operand.
1840   /// FIXME: This can be removed once reductions can be costed correctly in
1841   /// vplan. This was added to allow quick lookup to the inloop operations,
1842   /// without having to loop through InLoopReductionChains.
1843   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1844 
1845   /// Returns the expected difference in cost from scalarizing the expression
1846   /// feeding a predicated instruction \p PredInst. The instructions to
1847   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1848   /// non-negative return value implies the expression will be scalarized.
1849   /// Currently, only single-use chains are considered for scalarization.
1850   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1851                               ElementCount VF);
1852 
1853   /// Collect the instructions that are uniform after vectorization. An
1854   /// instruction is uniform if we represent it with a single scalar value in
1855   /// the vectorized loop corresponding to each vector iteration. Examples of
1856   /// uniform instructions include pointer operands of consecutive or
1857   /// interleaved memory accesses. Note that although uniformity implies an
1858   /// instruction will be scalar, the reverse is not true. In general, a
1859   /// scalarized instruction will be represented by VF scalar values in the
1860   /// vectorized loop, each corresponding to an iteration of the original
1861   /// scalar loop.
1862   void collectLoopUniforms(ElementCount VF);
1863 
1864   /// Collect the instructions that are scalar after vectorization. An
1865   /// instruction is scalar if it is known to be uniform or will be scalarized
1866   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1867   /// to the list if they are used by a load/store instruction that is marked as
1868   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1869   /// VF values in the vectorized loop, each corresponding to an iteration of
1870   /// the original scalar loop.
1871   void collectLoopScalars(ElementCount VF);
1872 
1873   /// Keeps cost model vectorization decision and cost for instructions.
1874   /// Right now it is used for memory instructions only.
1875   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1876                                 std::pair<InstWidening, InstructionCost>>;
1877 
1878   DecisionList WideningDecisions;
1879 
1880   /// Returns true if \p V is expected to be vectorized and it needs to be
1881   /// extracted.
1882   bool needsExtract(Value *V, ElementCount VF) const {
1883     Instruction *I = dyn_cast<Instruction>(V);
1884     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1885         TheLoop->isLoopInvariant(I))
1886       return false;
1887 
1888     // Assume we can vectorize V (and hence we need extraction) if the
1889     // scalars are not computed yet. This can happen, because it is called
1890     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1891     // the scalars are collected. That should be a safe assumption in most
1892     // cases, because we check if the operands have vectorizable types
1893     // beforehand in LoopVectorizationLegality.
1894     return Scalars.find(VF) == Scalars.end() ||
1895            !isScalarAfterVectorization(I, VF);
1896   };
1897 
1898   /// Returns a range containing only operands needing to be extracted.
1899   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1900                                                    ElementCount VF) const {
1901     return SmallVector<Value *, 4>(make_filter_range(
1902         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1903   }
1904 
1905   /// Determines if we have the infrastructure to vectorize loop \p L and its
1906   /// epilogue, assuming the main loop is vectorized by \p VF.
1907   bool isCandidateForEpilogueVectorization(const Loop &L,
1908                                            const ElementCount VF) const;
1909 
1910   /// Returns true if epilogue vectorization is considered profitable, and
1911   /// false otherwise.
1912   /// \p VF is the vectorization factor chosen for the original loop.
1913   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1914 
1915 public:
1916   /// The loop that we evaluate.
1917   Loop *TheLoop;
1918 
1919   /// Predicated scalar evolution analysis.
1920   PredicatedScalarEvolution &PSE;
1921 
1922   /// Loop Info analysis.
1923   LoopInfo *LI;
1924 
1925   /// Vectorization legality.
1926   LoopVectorizationLegality *Legal;
1927 
1928   /// Vector target information.
1929   const TargetTransformInfo &TTI;
1930 
1931   /// Target Library Info.
1932   const TargetLibraryInfo *TLI;
1933 
1934   /// Demanded bits analysis.
1935   DemandedBits *DB;
1936 
1937   /// Assumption cache.
1938   AssumptionCache *AC;
1939 
1940   /// Interface to emit optimization remarks.
1941   OptimizationRemarkEmitter *ORE;
1942 
1943   const Function *TheFunction;
1944 
1945   /// Loop Vectorize Hint.
1946   const LoopVectorizeHints *Hints;
1947 
1948   /// The interleave access information contains groups of interleaved accesses
1949   /// with the same stride and close to each other.
1950   InterleavedAccessInfo &InterleaveInfo;
1951 
1952   /// Values to ignore in the cost model.
1953   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1954 
1955   /// Values to ignore in the cost model when VF > 1.
1956   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1957 
1958   /// All element types found in the loop.
1959   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1960 
1961   /// Profitable vector factors.
1962   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1963 };
1964 } // end namespace llvm
1965 
1966 /// Helper struct to manage generating runtime checks for vectorization.
1967 ///
1968 /// The runtime checks are created up-front in temporary blocks to allow better
1969 /// estimating the cost and un-linked from the existing IR. After deciding to
1970 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1971 /// temporary blocks are completely removed.
1972 class GeneratedRTChecks {
1973   /// Basic block which contains the generated SCEV checks, if any.
1974   BasicBlock *SCEVCheckBlock = nullptr;
1975 
1976   /// The value representing the result of the generated SCEV checks. If it is
1977   /// nullptr, either no SCEV checks have been generated or they have been used.
1978   Value *SCEVCheckCond = nullptr;
1979 
1980   /// Basic block which contains the generated memory runtime checks, if any.
1981   BasicBlock *MemCheckBlock = nullptr;
1982 
1983   /// The value representing the result of the generated memory runtime checks.
1984   /// If it is nullptr, either no memory runtime checks have been generated or
1985   /// they have been used.
1986   Value *MemRuntimeCheckCond = nullptr;
1987 
1988   DominatorTree *DT;
1989   LoopInfo *LI;
1990 
1991   SCEVExpander SCEVExp;
1992   SCEVExpander MemCheckExp;
1993 
1994 public:
1995   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1996                     const DataLayout &DL)
1997       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1998         MemCheckExp(SE, DL, "scev.check") {}
1999 
2000   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
2001   /// accurately estimate the cost of the runtime checks. The blocks are
2002   /// un-linked from the IR and is added back during vector code generation. If
2003   /// there is no vector code generation, the check blocks are removed
2004   /// completely.
2005   void Create(Loop *L, const LoopAccessInfo &LAI,
2006               const SCEVUnionPredicate &UnionPred) {
2007 
2008     BasicBlock *LoopHeader = L->getHeader();
2009     BasicBlock *Preheader = L->getLoopPreheader();
2010 
2011     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
2012     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
2013     // may be used by SCEVExpander. The blocks will be un-linked from their
2014     // predecessors and removed from LI & DT at the end of the function.
2015     if (!UnionPred.isAlwaysTrue()) {
2016       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
2017                                   nullptr, "vector.scevcheck");
2018 
2019       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
2020           &UnionPred, SCEVCheckBlock->getTerminator());
2021     }
2022 
2023     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
2024     if (RtPtrChecking.Need) {
2025       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2026       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2027                                  "vector.memcheck");
2028 
2029       MemRuntimeCheckCond =
2030           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2031                            RtPtrChecking.getChecks(), MemCheckExp);
2032       assert(MemRuntimeCheckCond &&
2033              "no RT checks generated although RtPtrChecking "
2034              "claimed checks are required");
2035     }
2036 
2037     if (!MemCheckBlock && !SCEVCheckBlock)
2038       return;
2039 
2040     // Unhook the temporary block with the checks, update various places
2041     // accordingly.
2042     if (SCEVCheckBlock)
2043       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2044     if (MemCheckBlock)
2045       MemCheckBlock->replaceAllUsesWith(Preheader);
2046 
2047     if (SCEVCheckBlock) {
2048       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2049       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2050       Preheader->getTerminator()->eraseFromParent();
2051     }
2052     if (MemCheckBlock) {
2053       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2054       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2055       Preheader->getTerminator()->eraseFromParent();
2056     }
2057 
2058     DT->changeImmediateDominator(LoopHeader, Preheader);
2059     if (MemCheckBlock) {
2060       DT->eraseNode(MemCheckBlock);
2061       LI->removeBlock(MemCheckBlock);
2062     }
2063     if (SCEVCheckBlock) {
2064       DT->eraseNode(SCEVCheckBlock);
2065       LI->removeBlock(SCEVCheckBlock);
2066     }
2067   }
2068 
2069   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2070   /// unused.
2071   ~GeneratedRTChecks() {
2072     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2073     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2074     if (!SCEVCheckCond)
2075       SCEVCleaner.markResultUsed();
2076 
2077     if (!MemRuntimeCheckCond)
2078       MemCheckCleaner.markResultUsed();
2079 
2080     if (MemRuntimeCheckCond) {
2081       auto &SE = *MemCheckExp.getSE();
2082       // Memory runtime check generation creates compares that use expanded
2083       // values. Remove them before running the SCEVExpanderCleaners.
2084       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2085         if (MemCheckExp.isInsertedInstruction(&I))
2086           continue;
2087         SE.forgetValue(&I);
2088         I.eraseFromParent();
2089       }
2090     }
2091     MemCheckCleaner.cleanup();
2092     SCEVCleaner.cleanup();
2093 
2094     if (SCEVCheckCond)
2095       SCEVCheckBlock->eraseFromParent();
2096     if (MemRuntimeCheckCond)
2097       MemCheckBlock->eraseFromParent();
2098   }
2099 
2100   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2101   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2102   /// depending on the generated condition.
2103   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2104                              BasicBlock *LoopVectorPreHeader,
2105                              BasicBlock *LoopExitBlock) {
2106     if (!SCEVCheckCond)
2107       return nullptr;
2108     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2109       if (C->isZero())
2110         return nullptr;
2111 
2112     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2113 
2114     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2115     // Create new preheader for vector loop.
2116     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2117       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2118 
2119     SCEVCheckBlock->getTerminator()->eraseFromParent();
2120     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2121     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2122                                                 SCEVCheckBlock);
2123 
2124     DT->addNewBlock(SCEVCheckBlock, Pred);
2125     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2126 
2127     ReplaceInstWithInst(
2128         SCEVCheckBlock->getTerminator(),
2129         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2130     // Mark the check as used, to prevent it from being removed during cleanup.
2131     SCEVCheckCond = nullptr;
2132     return SCEVCheckBlock;
2133   }
2134 
2135   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2136   /// the branches to branch to the vector preheader or \p Bypass, depending on
2137   /// the generated condition.
2138   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2139                                    BasicBlock *LoopVectorPreHeader) {
2140     // Check if we generated code that checks in runtime if arrays overlap.
2141     if (!MemRuntimeCheckCond)
2142       return nullptr;
2143 
2144     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2145     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2146                                                 MemCheckBlock);
2147 
2148     DT->addNewBlock(MemCheckBlock, Pred);
2149     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2150     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2151 
2152     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2153       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2154 
2155     ReplaceInstWithInst(
2156         MemCheckBlock->getTerminator(),
2157         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2158     MemCheckBlock->getTerminator()->setDebugLoc(
2159         Pred->getTerminator()->getDebugLoc());
2160 
2161     // Mark the check as used, to prevent it from being removed during cleanup.
2162     MemRuntimeCheckCond = nullptr;
2163     return MemCheckBlock;
2164   }
2165 };
2166 
2167 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2168 // vectorization. The loop needs to be annotated with #pragma omp simd
2169 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2170 // vector length information is not provided, vectorization is not considered
2171 // explicit. Interleave hints are not allowed either. These limitations will be
2172 // relaxed in the future.
2173 // Please, note that we are currently forced to abuse the pragma 'clang
2174 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2175 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2176 // provides *explicit vectorization hints* (LV can bypass legal checks and
2177 // assume that vectorization is legal). However, both hints are implemented
2178 // using the same metadata (llvm.loop.vectorize, processed by
2179 // LoopVectorizeHints). This will be fixed in the future when the native IR
2180 // representation for pragma 'omp simd' is introduced.
2181 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2182                                    OptimizationRemarkEmitter *ORE) {
2183   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2184   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2185 
2186   // Only outer loops with an explicit vectorization hint are supported.
2187   // Unannotated outer loops are ignored.
2188   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2189     return false;
2190 
2191   Function *Fn = OuterLp->getHeader()->getParent();
2192   if (!Hints.allowVectorization(Fn, OuterLp,
2193                                 true /*VectorizeOnlyWhenForced*/)) {
2194     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2195     return false;
2196   }
2197 
2198   if (Hints.getInterleave() > 1) {
2199     // TODO: Interleave support is future work.
2200     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2201                          "outer loops.\n");
2202     Hints.emitRemarkWithHints();
2203     return false;
2204   }
2205 
2206   return true;
2207 }
2208 
2209 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2210                                   OptimizationRemarkEmitter *ORE,
2211                                   SmallVectorImpl<Loop *> &V) {
2212   // Collect inner loops and outer loops without irreducible control flow. For
2213   // now, only collect outer loops that have explicit vectorization hints. If we
2214   // are stress testing the VPlan H-CFG construction, we collect the outermost
2215   // loop of every loop nest.
2216   if (L.isInnermost() || VPlanBuildStressTest ||
2217       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2218     LoopBlocksRPO RPOT(&L);
2219     RPOT.perform(LI);
2220     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2221       V.push_back(&L);
2222       // TODO: Collect inner loops inside marked outer loops in case
2223       // vectorization fails for the outer loop. Do not invoke
2224       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2225       // already known to be reducible. We can use an inherited attribute for
2226       // that.
2227       return;
2228     }
2229   }
2230   for (Loop *InnerL : L)
2231     collectSupportedLoops(*InnerL, LI, ORE, V);
2232 }
2233 
2234 namespace {
2235 
2236 /// The LoopVectorize Pass.
2237 struct LoopVectorize : public FunctionPass {
2238   /// Pass identification, replacement for typeid
2239   static char ID;
2240 
2241   LoopVectorizePass Impl;
2242 
2243   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2244                          bool VectorizeOnlyWhenForced = false)
2245       : FunctionPass(ID),
2246         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2247     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2248   }
2249 
2250   bool runOnFunction(Function &F) override {
2251     if (skipFunction(F))
2252       return false;
2253 
2254     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2255     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2256     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2257     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2258     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2259     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2260     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2261     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2262     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2263     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2264     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2265     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2266     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2267 
2268     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2269         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2270 
2271     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2272                         GetLAA, *ORE, PSI).MadeAnyChange;
2273   }
2274 
2275   void getAnalysisUsage(AnalysisUsage &AU) const override {
2276     AU.addRequired<AssumptionCacheTracker>();
2277     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2278     AU.addRequired<DominatorTreeWrapperPass>();
2279     AU.addRequired<LoopInfoWrapperPass>();
2280     AU.addRequired<ScalarEvolutionWrapperPass>();
2281     AU.addRequired<TargetTransformInfoWrapperPass>();
2282     AU.addRequired<AAResultsWrapperPass>();
2283     AU.addRequired<LoopAccessLegacyAnalysis>();
2284     AU.addRequired<DemandedBitsWrapperPass>();
2285     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2286     AU.addRequired<InjectTLIMappingsLegacy>();
2287 
2288     // We currently do not preserve loopinfo/dominator analyses with outer loop
2289     // vectorization. Until this is addressed, mark these analyses as preserved
2290     // only for non-VPlan-native path.
2291     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2292     if (!EnableVPlanNativePath) {
2293       AU.addPreserved<LoopInfoWrapperPass>();
2294       AU.addPreserved<DominatorTreeWrapperPass>();
2295     }
2296 
2297     AU.addPreserved<BasicAAWrapperPass>();
2298     AU.addPreserved<GlobalsAAWrapperPass>();
2299     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2300   }
2301 };
2302 
2303 } // end anonymous namespace
2304 
2305 //===----------------------------------------------------------------------===//
2306 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2307 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2308 //===----------------------------------------------------------------------===//
2309 
2310 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2311   // We need to place the broadcast of invariant variables outside the loop,
2312   // but only if it's proven safe to do so. Else, broadcast will be inside
2313   // vector loop body.
2314   Instruction *Instr = dyn_cast<Instruction>(V);
2315   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2316                      (!Instr ||
2317                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2318   // Place the code for broadcasting invariant variables in the new preheader.
2319   IRBuilder<>::InsertPointGuard Guard(Builder);
2320   if (SafeToHoist)
2321     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2322 
2323   // Broadcast the scalar into all locations in the vector.
2324   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2325 
2326   return Shuf;
2327 }
2328 
2329 /// This function adds
2330 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2331 /// to each vector element of Val. The sequence starts at StartIndex.
2332 /// \p Opcode is relevant for FP induction variable.
2333 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2334                             Instruction::BinaryOps BinOp, ElementCount VF,
2335                             IRBuilder<> &Builder) {
2336   if (VF.isScalar()) {
2337     // When unrolling and the VF is 1, we only need to add a simple scalar.
2338     Type *Ty = Val->getType();
2339     assert(!Ty->isVectorTy() && "Val must be a scalar");
2340 
2341     if (Ty->isFloatingPointTy()) {
2342       // Floating-point operations inherit FMF via the builder's flags.
2343       Value *MulOp = Builder.CreateFMul(StartIdx, Step);
2344       return Builder.CreateBinOp(BinOp, Val, MulOp);
2345     }
2346     return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step),
2347                              "induction");
2348   }
2349 
2350   // Create and check the types.
2351   auto *ValVTy = cast<VectorType>(Val->getType());
2352   ElementCount VLen = ValVTy->getElementCount();
2353 
2354   Type *STy = Val->getType()->getScalarType();
2355   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2356          "Induction Step must be an integer or FP");
2357   assert(Step->getType() == STy && "Step has wrong type");
2358 
2359   SmallVector<Constant *, 8> Indices;
2360 
2361   // Create a vector of consecutive numbers from zero to VF.
2362   VectorType *InitVecValVTy = ValVTy;
2363   Type *InitVecValSTy = STy;
2364   if (STy->isFloatingPointTy()) {
2365     InitVecValSTy =
2366         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2367     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2368   }
2369   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2370 
2371   // Splat the StartIdx
2372   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2373 
2374   if (STy->isIntegerTy()) {
2375     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2376     Step = Builder.CreateVectorSplat(VLen, Step);
2377     assert(Step->getType() == Val->getType() && "Invalid step vec");
2378     // FIXME: The newly created binary instructions should contain nsw/nuw
2379     // flags, which can be found from the original scalar operations.
2380     Step = Builder.CreateMul(InitVec, Step);
2381     return Builder.CreateAdd(Val, Step, "induction");
2382   }
2383 
2384   // Floating point induction.
2385   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2386          "Binary Opcode should be specified for FP induction");
2387   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2388   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2389 
2390   Step = Builder.CreateVectorSplat(VLen, Step);
2391   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2392   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2393 }
2394 
2395 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2396     const InductionDescriptor &II, Value *Step, Value *Start,
2397     Instruction *EntryVal, VPValue *Def, VPTransformState &State) {
2398   IRBuilder<> &Builder = State.Builder;
2399   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2400          "Expected either an induction phi-node or a truncate of it!");
2401 
2402   // Construct the initial value of the vector IV in the vector loop preheader
2403   auto CurrIP = Builder.saveIP();
2404   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2405   if (isa<TruncInst>(EntryVal)) {
2406     assert(Start->getType()->isIntegerTy() &&
2407            "Truncation requires an integer type");
2408     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2409     Step = Builder.CreateTrunc(Step, TruncType);
2410     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2411   }
2412 
2413   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
2414   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
2415   Value *SteppedStart = getStepVector(
2416       SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder);
2417 
2418   // We create vector phi nodes for both integer and floating-point induction
2419   // variables. Here, we determine the kind of arithmetic we will perform.
2420   Instruction::BinaryOps AddOp;
2421   Instruction::BinaryOps MulOp;
2422   if (Step->getType()->isIntegerTy()) {
2423     AddOp = Instruction::Add;
2424     MulOp = Instruction::Mul;
2425   } else {
2426     AddOp = II.getInductionOpcode();
2427     MulOp = Instruction::FMul;
2428   }
2429 
2430   // Multiply the vectorization factor by the step using integer or
2431   // floating-point arithmetic as appropriate.
2432   Type *StepType = Step->getType();
2433   Value *RuntimeVF;
2434   if (Step->getType()->isFloatingPointTy())
2435     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
2436   else
2437     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
2438   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2439 
2440   // Create a vector splat to use in the induction update.
2441   //
2442   // FIXME: If the step is non-constant, we create the vector splat with
2443   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2444   //        handle a constant vector splat.
2445   Value *SplatVF = isa<Constant>(Mul)
2446                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
2447                        : Builder.CreateVectorSplat(State.VF, Mul);
2448   Builder.restoreIP(CurrIP);
2449 
2450   // We may need to add the step a number of times, depending on the unroll
2451   // factor. The last of those goes into the PHI.
2452   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2453                                     &*LoopVectorBody->getFirstInsertionPt());
2454   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2455   Instruction *LastInduction = VecInd;
2456   for (unsigned Part = 0; Part < UF; ++Part) {
2457     State.set(Def, LastInduction, Part);
2458 
2459     if (isa<TruncInst>(EntryVal))
2460       addMetadata(LastInduction, EntryVal);
2461 
2462     LastInduction = cast<Instruction>(
2463         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2464     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2465   }
2466 
2467   // Move the last step to the end of the latch block. This ensures consistent
2468   // placement of all induction updates.
2469   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2470   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2471   LastInduction->moveBefore(Br);
2472   LastInduction->setName("vec.ind.next");
2473 
2474   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2475   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2476 }
2477 
2478 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2479   return Cost->isScalarAfterVectorization(I, VF) ||
2480          Cost->isProfitableToScalarize(I, VF);
2481 }
2482 
2483 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2484   if (shouldScalarizeInstruction(IV))
2485     return true;
2486   auto isScalarInst = [&](User *U) -> bool {
2487     auto *I = cast<Instruction>(U);
2488     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2489   };
2490   return llvm::any_of(IV->users(), isScalarInst);
2491 }
2492 
2493 /// Returns true if \p ID starts at 0 and has a step of 1.
2494 static bool isCanonicalID(const InductionDescriptor &ID) {
2495   if (!ID.getConstIntStepValue() || !ID.getConstIntStepValue()->isOne())
2496     return false;
2497   auto *StartC = dyn_cast<ConstantInt>(ID.getStartValue());
2498   return StartC && StartC->isZero();
2499 }
2500 
2501 void InnerLoopVectorizer::widenIntOrFpInduction(
2502     PHINode *IV, const InductionDescriptor &ID, Value *Start, TruncInst *Trunc,
2503     VPValue *Def, VPTransformState &State, Value *CanonicalIV) {
2504   IRBuilder<> &Builder = State.Builder;
2505   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2506   assert(!State.VF.isZero() && "VF must be non-zero");
2507 
2508   // The value from the original loop to which we are mapping the new induction
2509   // variable.
2510   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2511 
2512   auto &DL = EntryVal->getModule()->getDataLayout();
2513 
2514   // Generate code for the induction step. Note that induction steps are
2515   // required to be loop-invariant
2516   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2517     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2518            "Induction step should be loop invariant");
2519     if (PSE.getSE()->isSCEVable(IV->getType())) {
2520       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2521       return Exp.expandCodeFor(Step, Step->getType(),
2522                                State.CFG.VectorPreHeader->getTerminator());
2523     }
2524     return cast<SCEVUnknown>(Step)->getValue();
2525   };
2526 
2527   // The scalar value to broadcast. This is derived from the canonical
2528   // induction variable. If a truncation type is given, truncate the canonical
2529   // induction variable and step. Otherwise, derive these values from the
2530   // induction descriptor.
2531   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2532     Value *ScalarIV = CanonicalIV;
2533     Type *NeededType = IV->getType();
2534     if (!isCanonicalID(ID) || ScalarIV->getType() != NeededType) {
2535       ScalarIV =
2536           NeededType->isIntegerTy()
2537               ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType)
2538               : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType);
2539       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID,
2540                                       State.CFG.PrevBB);
2541       ScalarIV->setName("offset.idx");
2542     }
2543     if (Trunc) {
2544       auto *TruncType = cast<IntegerType>(Trunc->getType());
2545       assert(Step->getType()->isIntegerTy() &&
2546              "Truncation requires an integer step");
2547       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2548       Step = Builder.CreateTrunc(Step, TruncType);
2549     }
2550     return ScalarIV;
2551   };
2552 
2553   // Create the vector values from the scalar IV, in the absence of creating a
2554   // vector IV.
2555   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2556     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2557     for (unsigned Part = 0; Part < UF; ++Part) {
2558       Value *StartIdx;
2559       if (Step->getType()->isFloatingPointTy())
2560         StartIdx =
2561             getRuntimeVFAsFloat(Builder, Step->getType(), State.VF * Part);
2562       else
2563         StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part);
2564 
2565       Value *EntryPart =
2566           getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(),
2567                         State.VF, State.Builder);
2568       State.set(Def, EntryPart, Part);
2569       if (Trunc)
2570         addMetadata(EntryPart, Trunc);
2571     }
2572   };
2573 
2574   // Fast-math-flags propagate from the original induction instruction.
2575   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2576   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2577     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2578 
2579   // Now do the actual transformations, and start with creating the step value.
2580   Value *Step = CreateStepValue(ID.getStep());
2581   if (State.VF.isScalar()) {
2582     Value *ScalarIV = CreateScalarIV(Step);
2583     Type *ScalarTy = IntegerType::get(ScalarIV->getContext(),
2584                                       Step->getType()->getScalarSizeInBits());
2585     for (unsigned Part = 0; Part < UF; ++Part) {
2586       Value *StartIdx = ConstantInt::get(ScalarTy, Part);
2587       Instruction::BinaryOps MulOp = Instruction::Mul;
2588       if (Step->getType()->isFloatingPointTy()) {
2589         StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType());
2590         MulOp = Instruction::FMul;
2591       }
2592 
2593       Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2594       Value *EntryPart = Builder.CreateBinOp(ID.getInductionOpcode(), ScalarIV,
2595                                              Mul, "induction");
2596       State.set(Def, EntryPart, Part);
2597       if (Trunc) {
2598         assert(!Step->getType()->isFloatingPointTy() &&
2599                "fp inductions shouldn't be truncated");
2600         addMetadata(EntryPart, Trunc);
2601       }
2602     }
2603     return;
2604   }
2605 
2606   // Determine if we want a scalar version of the induction variable. This is
2607   // true if the induction variable itself is not widened, or if it has at
2608   // least one user in the loop that is not widened.
2609   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2610   if (!NeedsScalarIV) {
2611     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2612     return;
2613   }
2614 
2615   // Try to create a new independent vector induction variable. If we can't
2616   // create the phi node, we will splat the scalar induction variable in each
2617   // loop iteration.
2618   if (!shouldScalarizeInstruction(EntryVal)) {
2619     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2620     Value *ScalarIV = CreateScalarIV(Step);
2621     // Create scalar steps that can be used by instructions we will later
2622     // scalarize. Note that the addition of the scalar steps will not increase
2623     // the number of instructions in the loop in the common case prior to
2624     // InstCombine. We will be trading one vector extract for each scalar step.
2625     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2626     return;
2627   }
2628 
2629   // All IV users are scalar instructions, so only emit a scalar IV, not a
2630   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2631   // predicate used by the masked loads/stores.
2632   Value *ScalarIV = CreateScalarIV(Step);
2633   if (!Cost->isScalarEpilogueAllowed())
2634     CreateSplatIV(ScalarIV, Step);
2635   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2636 }
2637 
2638 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2639                                            Instruction *EntryVal,
2640                                            const InductionDescriptor &ID,
2641                                            VPValue *Def,
2642                                            VPTransformState &State) {
2643   IRBuilder<> &Builder = State.Builder;
2644   // We shouldn't have to build scalar steps if we aren't vectorizing.
2645   assert(State.VF.isVector() && "VF should be greater than one");
2646   // Get the value type and ensure it and the step have the same integer type.
2647   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2648   assert(ScalarIVTy == Step->getType() &&
2649          "Val and Step should have the same type");
2650 
2651   // We build scalar steps for both integer and floating-point induction
2652   // variables. Here, we determine the kind of arithmetic we will perform.
2653   Instruction::BinaryOps AddOp;
2654   Instruction::BinaryOps MulOp;
2655   if (ScalarIVTy->isIntegerTy()) {
2656     AddOp = Instruction::Add;
2657     MulOp = Instruction::Mul;
2658   } else {
2659     AddOp = ID.getInductionOpcode();
2660     MulOp = Instruction::FMul;
2661   }
2662 
2663   // Determine the number of scalars we need to generate for each unroll
2664   // iteration. If EntryVal is uniform, we only need to generate the first
2665   // lane. Otherwise, we generate all VF values.
2666   bool IsUniform =
2667       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), State.VF);
2668   unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
2669   // Compute the scalar steps and save the results in State.
2670   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2671                                      ScalarIVTy->getScalarSizeInBits());
2672   Type *VecIVTy = nullptr;
2673   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2674   if (!IsUniform && State.VF.isScalable()) {
2675     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2676     UnitStepVec =
2677         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2678     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2679     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2680   }
2681 
2682   for (unsigned Part = 0; Part < State.UF; ++Part) {
2683     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2684 
2685     if (!IsUniform && State.VF.isScalable()) {
2686       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2687       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2688       if (ScalarIVTy->isFloatingPointTy())
2689         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2690       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2691       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2692       State.set(Def, Add, Part);
2693       // It's useful to record the lane values too for the known minimum number
2694       // of elements so we do those below. This improves the code quality when
2695       // trying to extract the first element, for example.
2696     }
2697 
2698     if (ScalarIVTy->isFloatingPointTy())
2699       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2700 
2701     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2702       Value *StartIdx = Builder.CreateBinOp(
2703           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2704       // The step returned by `createStepForVF` is a runtime-evaluated value
2705       // when VF is scalable. Otherwise, it should be folded into a Constant.
2706       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2707              "Expected StartIdx to be folded to a constant when VF is not "
2708              "scalable");
2709       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2710       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2711       State.set(Def, Add, VPIteration(Part, Lane));
2712     }
2713   }
2714 }
2715 
2716 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2717                                                     const VPIteration &Instance,
2718                                                     VPTransformState &State) {
2719   Value *ScalarInst = State.get(Def, Instance);
2720   Value *VectorValue = State.get(Def, Instance.Part);
2721   VectorValue = Builder.CreateInsertElement(
2722       VectorValue, ScalarInst,
2723       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2724   State.set(Def, VectorValue, Instance.Part);
2725 }
2726 
2727 // Return whether we allow using masked interleave-groups (for dealing with
2728 // strided loads/stores that reside in predicated blocks, or for dealing
2729 // with gaps).
2730 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2731   // If an override option has been passed in for interleaved accesses, use it.
2732   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2733     return EnableMaskedInterleavedMemAccesses;
2734 
2735   return TTI.enableMaskedInterleavedAccessVectorization();
2736 }
2737 
2738 // Try to vectorize the interleave group that \p Instr belongs to.
2739 //
2740 // E.g. Translate following interleaved load group (factor = 3):
2741 //   for (i = 0; i < N; i+=3) {
2742 //     R = Pic[i];             // Member of index 0
2743 //     G = Pic[i+1];           // Member of index 1
2744 //     B = Pic[i+2];           // Member of index 2
2745 //     ... // do something to R, G, B
2746 //   }
2747 // To:
2748 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2749 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2750 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2751 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2752 //
2753 // Or translate following interleaved store group (factor = 3):
2754 //   for (i = 0; i < N; i+=3) {
2755 //     ... do something to R, G, B
2756 //     Pic[i]   = R;           // Member of index 0
2757 //     Pic[i+1] = G;           // Member of index 1
2758 //     Pic[i+2] = B;           // Member of index 2
2759 //   }
2760 // To:
2761 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2762 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2763 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2764 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2765 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2766 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2767     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2768     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2769     VPValue *BlockInMask) {
2770   Instruction *Instr = Group->getInsertPos();
2771   const DataLayout &DL = Instr->getModule()->getDataLayout();
2772 
2773   // Prepare for the vector type of the interleaved load/store.
2774   Type *ScalarTy = getLoadStoreType(Instr);
2775   unsigned InterleaveFactor = Group->getFactor();
2776   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2777   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2778 
2779   // Prepare for the new pointers.
2780   SmallVector<Value *, 2> AddrParts;
2781   unsigned Index = Group->getIndex(Instr);
2782 
2783   // TODO: extend the masked interleaved-group support to reversed access.
2784   assert((!BlockInMask || !Group->isReverse()) &&
2785          "Reversed masked interleave-group not supported.");
2786 
2787   // If the group is reverse, adjust the index to refer to the last vector lane
2788   // instead of the first. We adjust the index from the first vector lane,
2789   // rather than directly getting the pointer for lane VF - 1, because the
2790   // pointer operand of the interleaved access is supposed to be uniform. For
2791   // uniform instructions, we're only required to generate a value for the
2792   // first vector lane in each unroll iteration.
2793   if (Group->isReverse())
2794     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2795 
2796   for (unsigned Part = 0; Part < UF; Part++) {
2797     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2798     setDebugLocFromInst(AddrPart);
2799 
2800     // Notice current instruction could be any index. Need to adjust the address
2801     // to the member of index 0.
2802     //
2803     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2804     //       b = A[i];       // Member of index 0
2805     // Current pointer is pointed to A[i+1], adjust it to A[i].
2806     //
2807     // E.g.  A[i+1] = a;     // Member of index 1
2808     //       A[i]   = b;     // Member of index 0
2809     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2810     // Current pointer is pointed to A[i+2], adjust it to A[i].
2811 
2812     bool InBounds = false;
2813     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2814       InBounds = gep->isInBounds();
2815     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2816     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2817 
2818     // Cast to the vector pointer type.
2819     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2820     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2821     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2822   }
2823 
2824   setDebugLocFromInst(Instr);
2825   Value *PoisonVec = PoisonValue::get(VecTy);
2826 
2827   Value *MaskForGaps = nullptr;
2828   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2829     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2830     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2831   }
2832 
2833   // Vectorize the interleaved load group.
2834   if (isa<LoadInst>(Instr)) {
2835     // For each unroll part, create a wide load for the group.
2836     SmallVector<Value *, 2> NewLoads;
2837     for (unsigned Part = 0; Part < UF; Part++) {
2838       Instruction *NewLoad;
2839       if (BlockInMask || MaskForGaps) {
2840         assert(useMaskedInterleavedAccesses(*TTI) &&
2841                "masked interleaved groups are not allowed.");
2842         Value *GroupMask = MaskForGaps;
2843         if (BlockInMask) {
2844           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2845           Value *ShuffledMask = Builder.CreateShuffleVector(
2846               BlockInMaskPart,
2847               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2848               "interleaved.mask");
2849           GroupMask = MaskForGaps
2850                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2851                                                 MaskForGaps)
2852                           : ShuffledMask;
2853         }
2854         NewLoad =
2855             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2856                                      GroupMask, PoisonVec, "wide.masked.vec");
2857       }
2858       else
2859         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2860                                             Group->getAlign(), "wide.vec");
2861       Group->addMetadata(NewLoad);
2862       NewLoads.push_back(NewLoad);
2863     }
2864 
2865     // For each member in the group, shuffle out the appropriate data from the
2866     // wide loads.
2867     unsigned J = 0;
2868     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2869       Instruction *Member = Group->getMember(I);
2870 
2871       // Skip the gaps in the group.
2872       if (!Member)
2873         continue;
2874 
2875       auto StrideMask =
2876           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2877       for (unsigned Part = 0; Part < UF; Part++) {
2878         Value *StridedVec = Builder.CreateShuffleVector(
2879             NewLoads[Part], StrideMask, "strided.vec");
2880 
2881         // If this member has different type, cast the result type.
2882         if (Member->getType() != ScalarTy) {
2883           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2884           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2885           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2886         }
2887 
2888         if (Group->isReverse())
2889           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2890 
2891         State.set(VPDefs[J], StridedVec, Part);
2892       }
2893       ++J;
2894     }
2895     return;
2896   }
2897 
2898   // The sub vector type for current instruction.
2899   auto *SubVT = VectorType::get(ScalarTy, VF);
2900 
2901   // Vectorize the interleaved store group.
2902   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2903   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2904          "masked interleaved groups are not allowed.");
2905   assert((!MaskForGaps || !VF.isScalable()) &&
2906          "masking gaps for scalable vectors is not yet supported.");
2907   for (unsigned Part = 0; Part < UF; Part++) {
2908     // Collect the stored vector from each member.
2909     SmallVector<Value *, 4> StoredVecs;
2910     for (unsigned i = 0; i < InterleaveFactor; i++) {
2911       assert((Group->getMember(i) || MaskForGaps) &&
2912              "Fail to get a member from an interleaved store group");
2913       Instruction *Member = Group->getMember(i);
2914 
2915       // Skip the gaps in the group.
2916       if (!Member) {
2917         Value *Undef = PoisonValue::get(SubVT);
2918         StoredVecs.push_back(Undef);
2919         continue;
2920       }
2921 
2922       Value *StoredVec = State.get(StoredValues[i], Part);
2923 
2924       if (Group->isReverse())
2925         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2926 
2927       // If this member has different type, cast it to a unified type.
2928 
2929       if (StoredVec->getType() != SubVT)
2930         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2931 
2932       StoredVecs.push_back(StoredVec);
2933     }
2934 
2935     // Concatenate all vectors into a wide vector.
2936     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2937 
2938     // Interleave the elements in the wide vector.
2939     Value *IVec = Builder.CreateShuffleVector(
2940         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2941         "interleaved.vec");
2942 
2943     Instruction *NewStoreInstr;
2944     if (BlockInMask || MaskForGaps) {
2945       Value *GroupMask = MaskForGaps;
2946       if (BlockInMask) {
2947         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2948         Value *ShuffledMask = Builder.CreateShuffleVector(
2949             BlockInMaskPart,
2950             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2951             "interleaved.mask");
2952         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2953                                                       ShuffledMask, MaskForGaps)
2954                                 : ShuffledMask;
2955       }
2956       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2957                                                 Group->getAlign(), GroupMask);
2958     } else
2959       NewStoreInstr =
2960           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2961 
2962     Group->addMetadata(NewStoreInstr);
2963   }
2964 }
2965 
2966 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2967                                                VPReplicateRecipe *RepRecipe,
2968                                                const VPIteration &Instance,
2969                                                bool IfPredicateInstr,
2970                                                VPTransformState &State) {
2971   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2972 
2973   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2974   // the first lane and part.
2975   if (isa<NoAliasScopeDeclInst>(Instr))
2976     if (!Instance.isFirstIteration())
2977       return;
2978 
2979   setDebugLocFromInst(Instr);
2980 
2981   // Does this instruction return a value ?
2982   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2983 
2984   Instruction *Cloned = Instr->clone();
2985   if (!IsVoidRetTy)
2986     Cloned->setName(Instr->getName() + ".cloned");
2987 
2988   // If the scalarized instruction contributes to the address computation of a
2989   // widen masked load/store which was in a basic block that needed predication
2990   // and is not predicated after vectorization, we can't propagate
2991   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2992   // instruction could feed a poison value to the base address of the widen
2993   // load/store.
2994   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2995     Cloned->dropPoisonGeneratingFlags();
2996 
2997   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2998                                Builder.GetInsertPoint());
2999   // Replace the operands of the cloned instructions with their scalar
3000   // equivalents in the new loop.
3001   for (auto &I : enumerate(RepRecipe->operands())) {
3002     auto InputInstance = Instance;
3003     VPValue *Operand = I.value();
3004     if (State.Plan->isUniformAfterVectorization(Operand))
3005       InputInstance.Lane = VPLane::getFirstLane();
3006     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
3007   }
3008   addNewMetadata(Cloned, Instr);
3009 
3010   // Place the cloned scalar in the new loop.
3011   Builder.Insert(Cloned);
3012 
3013   State.set(RepRecipe, Cloned, Instance);
3014 
3015   // If we just cloned a new assumption, add it the assumption cache.
3016   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3017     AC->registerAssumption(II);
3018 
3019   // End if-block.
3020   if (IfPredicateInstr)
3021     PredicatedInstructions.push_back(Cloned);
3022 }
3023 
3024 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
3025   BasicBlock *Header = L->getHeader();
3026   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
3027 
3028   IRBuilder<> B(Header->getTerminator());
3029   Instruction *OldInst =
3030       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
3031   setDebugLocFromInst(OldInst, &B);
3032 
3033   // Connect the header to the exit and header blocks and replace the old
3034   // terminator.
3035   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
3036 
3037   // Now we have two terminators. Remove the old one from the block.
3038   Header->getTerminator()->eraseFromParent();
3039 }
3040 
3041 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3042   if (TripCount)
3043     return TripCount;
3044 
3045   assert(L && "Create Trip Count for null loop.");
3046   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3047   // Find the loop boundaries.
3048   ScalarEvolution *SE = PSE.getSE();
3049   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3050   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3051          "Invalid loop count");
3052 
3053   Type *IdxTy = Legal->getWidestInductionType();
3054   assert(IdxTy && "No type for induction");
3055 
3056   // The exit count might have the type of i64 while the phi is i32. This can
3057   // happen if we have an induction variable that is sign extended before the
3058   // compare. The only way that we get a backedge taken count is that the
3059   // induction variable was signed and as such will not overflow. In such a case
3060   // truncation is legal.
3061   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3062       IdxTy->getPrimitiveSizeInBits())
3063     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3064   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3065 
3066   // Get the total trip count from the count by adding 1.
3067   const SCEV *ExitCount = SE->getAddExpr(
3068       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3069 
3070   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3071 
3072   // Expand the trip count and place the new instructions in the preheader.
3073   // Notice that the pre-header does not change, only the loop body.
3074   SCEVExpander Exp(*SE, DL, "induction");
3075 
3076   // Count holds the overall loop count (N).
3077   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3078                                 L->getLoopPreheader()->getTerminator());
3079 
3080   if (TripCount->getType()->isPointerTy())
3081     TripCount =
3082         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3083                                     L->getLoopPreheader()->getTerminator());
3084 
3085   return TripCount;
3086 }
3087 
3088 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3089   if (VectorTripCount)
3090     return VectorTripCount;
3091 
3092   Value *TC = getOrCreateTripCount(L);
3093   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3094 
3095   Type *Ty = TC->getType();
3096   // This is where we can make the step a runtime constant.
3097   Value *Step = createStepForVF(Builder, Ty, VF, UF);
3098 
3099   // If the tail is to be folded by masking, round the number of iterations N
3100   // up to a multiple of Step instead of rounding down. This is done by first
3101   // adding Step-1 and then rounding down. Note that it's ok if this addition
3102   // overflows: the vector induction variable will eventually wrap to zero given
3103   // that it starts at zero and its Step is a power of two; the loop will then
3104   // exit, with the last early-exit vector comparison also producing all-true.
3105   if (Cost->foldTailByMasking()) {
3106     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3107            "VF*UF must be a power of 2 when folding tail by masking");
3108     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
3109     TC = Builder.CreateAdd(
3110         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
3111   }
3112 
3113   // Now we need to generate the expression for the part of the loop that the
3114   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3115   // iterations are not required for correctness, or N - Step, otherwise. Step
3116   // is equal to the vectorization factor (number of SIMD elements) times the
3117   // unroll factor (number of SIMD instructions).
3118   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3119 
3120   // There are cases where we *must* run at least one iteration in the remainder
3121   // loop.  See the cost model for when this can happen.  If the step evenly
3122   // divides the trip count, we set the remainder to be equal to the step. If
3123   // the step does not evenly divide the trip count, no adjustment is necessary
3124   // since there will already be scalar iterations. Note that the minimum
3125   // iterations check ensures that N >= Step.
3126   if (Cost->requiresScalarEpilogue(VF)) {
3127     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3128     R = Builder.CreateSelect(IsZero, Step, R);
3129   }
3130 
3131   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3132 
3133   return VectorTripCount;
3134 }
3135 
3136 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3137                                                    const DataLayout &DL) {
3138   // Verify that V is a vector type with same number of elements as DstVTy.
3139   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3140   unsigned VF = DstFVTy->getNumElements();
3141   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3142   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3143   Type *SrcElemTy = SrcVecTy->getElementType();
3144   Type *DstElemTy = DstFVTy->getElementType();
3145   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3146          "Vector elements must have same size");
3147 
3148   // Do a direct cast if element types are castable.
3149   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3150     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3151   }
3152   // V cannot be directly casted to desired vector type.
3153   // May happen when V is a floating point vector but DstVTy is a vector of
3154   // pointers or vice-versa. Handle this using a two-step bitcast using an
3155   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3156   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3157          "Only one type should be a pointer type");
3158   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3159          "Only one type should be a floating point type");
3160   Type *IntTy =
3161       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3162   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3163   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3164   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3165 }
3166 
3167 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3168                                                          BasicBlock *Bypass) {
3169   Value *Count = getOrCreateTripCount(L);
3170   // Reuse existing vector loop preheader for TC checks.
3171   // Note that new preheader block is generated for vector loop.
3172   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3173   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3174 
3175   // Generate code to check if the loop's trip count is less than VF * UF, or
3176   // equal to it in case a scalar epilogue is required; this implies that the
3177   // vector trip count is zero. This check also covers the case where adding one
3178   // to the backedge-taken count overflowed leading to an incorrect trip count
3179   // of zero. In this case we will also jump to the scalar loop.
3180   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3181                                             : ICmpInst::ICMP_ULT;
3182 
3183   // If tail is to be folded, vector loop takes care of all iterations.
3184   Value *CheckMinIters = Builder.getFalse();
3185   if (!Cost->foldTailByMasking()) {
3186     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3187     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3188   }
3189   // Create new preheader for vector loop.
3190   LoopVectorPreHeader =
3191       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3192                  "vector.ph");
3193 
3194   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3195                                DT->getNode(Bypass)->getIDom()) &&
3196          "TC check is expected to dominate Bypass");
3197 
3198   // Update dominator for Bypass & LoopExit (if needed).
3199   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3200   if (!Cost->requiresScalarEpilogue(VF))
3201     // If there is an epilogue which must run, there's no edge from the
3202     // middle block to exit blocks  and thus no need to update the immediate
3203     // dominator of the exit blocks.
3204     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3205 
3206   ReplaceInstWithInst(
3207       TCCheckBlock->getTerminator(),
3208       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3209   LoopBypassBlocks.push_back(TCCheckBlock);
3210 }
3211 
3212 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3213 
3214   BasicBlock *const SCEVCheckBlock =
3215       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3216   if (!SCEVCheckBlock)
3217     return nullptr;
3218 
3219   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3220            (OptForSizeBasedOnProfile &&
3221             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3222          "Cannot SCEV check stride or overflow when optimizing for size");
3223 
3224 
3225   // Update dominator only if this is first RT check.
3226   if (LoopBypassBlocks.empty()) {
3227     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3228     if (!Cost->requiresScalarEpilogue(VF))
3229       // If there is an epilogue which must run, there's no edge from the
3230       // middle block to exit blocks  and thus no need to update the immediate
3231       // dominator of the exit blocks.
3232       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3233   }
3234 
3235   LoopBypassBlocks.push_back(SCEVCheckBlock);
3236   AddedSafetyChecks = true;
3237   return SCEVCheckBlock;
3238 }
3239 
3240 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3241                                                       BasicBlock *Bypass) {
3242   // VPlan-native path does not do any analysis for runtime checks currently.
3243   if (EnableVPlanNativePath)
3244     return nullptr;
3245 
3246   BasicBlock *const MemCheckBlock =
3247       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3248 
3249   // Check if we generated code that checks in runtime if arrays overlap. We put
3250   // the checks into a separate block to make the more common case of few
3251   // elements faster.
3252   if (!MemCheckBlock)
3253     return nullptr;
3254 
3255   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3256     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3257            "Cannot emit memory checks when optimizing for size, unless forced "
3258            "to vectorize.");
3259     ORE->emit([&]() {
3260       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3261                                         L->getStartLoc(), L->getHeader())
3262              << "Code-size may be reduced by not forcing "
3263                 "vectorization, or by source-code modifications "
3264                 "eliminating the need for runtime checks "
3265                 "(e.g., adding 'restrict').";
3266     });
3267   }
3268 
3269   LoopBypassBlocks.push_back(MemCheckBlock);
3270 
3271   AddedSafetyChecks = true;
3272 
3273   // We currently don't use LoopVersioning for the actual loop cloning but we
3274   // still use it to add the noalias metadata.
3275   LVer = std::make_unique<LoopVersioning>(
3276       *Legal->getLAI(),
3277       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3278       DT, PSE.getSE());
3279   LVer->prepareNoAliasMetadata();
3280   return MemCheckBlock;
3281 }
3282 
3283 Value *InnerLoopVectorizer::emitTransformedIndex(
3284     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3285     const InductionDescriptor &ID, BasicBlock *VectorHeader) const {
3286 
3287   SCEVExpander Exp(*SE, DL, "induction");
3288   auto Step = ID.getStep();
3289   auto StartValue = ID.getStartValue();
3290   assert(Index->getType()->getScalarType() == Step->getType() &&
3291          "Index scalar type does not match StepValue type");
3292 
3293   // Note: the IR at this point is broken. We cannot use SE to create any new
3294   // SCEV and then expand it, hoping that SCEV's simplification will give us
3295   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3296   // lead to various SCEV crashes. So all we can do is to use builder and rely
3297   // on InstCombine for future simplifications. Here we handle some trivial
3298   // cases only.
3299   auto CreateAdd = [&B](Value *X, Value *Y) {
3300     assert(X->getType() == Y->getType() && "Types don't match!");
3301     if (auto *CX = dyn_cast<ConstantInt>(X))
3302       if (CX->isZero())
3303         return Y;
3304     if (auto *CY = dyn_cast<ConstantInt>(Y))
3305       if (CY->isZero())
3306         return X;
3307     return B.CreateAdd(X, Y);
3308   };
3309 
3310   // We allow X to be a vector type, in which case Y will potentially be
3311   // splatted into a vector with the same element count.
3312   auto CreateMul = [&B](Value *X, Value *Y) {
3313     assert(X->getType()->getScalarType() == Y->getType() &&
3314            "Types don't match!");
3315     if (auto *CX = dyn_cast<ConstantInt>(X))
3316       if (CX->isOne())
3317         return Y;
3318     if (auto *CY = dyn_cast<ConstantInt>(Y))
3319       if (CY->isOne())
3320         return X;
3321     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3322     if (XVTy && !isa<VectorType>(Y->getType()))
3323       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3324     return B.CreateMul(X, Y);
3325   };
3326 
3327   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3328   // loop, choose the end of the vector loop header (=VectorHeader), because
3329   // the DomTree is not kept up-to-date for additional blocks generated in the
3330   // vector loop. By using the header as insertion point, we guarantee that the
3331   // expanded instructions dominate all their uses.
3332   auto GetInsertPoint = [this, &B, VectorHeader]() {
3333     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3334     if (InsertBB != LoopVectorBody &&
3335         LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB))
3336       return VectorHeader->getTerminator();
3337     return &*B.GetInsertPoint();
3338   };
3339 
3340   switch (ID.getKind()) {
3341   case InductionDescriptor::IK_IntInduction: {
3342     assert(!isa<VectorType>(Index->getType()) &&
3343            "Vector indices not supported for integer inductions yet");
3344     assert(Index->getType() == StartValue->getType() &&
3345            "Index type does not match StartValue type");
3346     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3347       return B.CreateSub(StartValue, Index);
3348     auto *Offset = CreateMul(
3349         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3350     return CreateAdd(StartValue, Offset);
3351   }
3352   case InductionDescriptor::IK_PtrInduction: {
3353     assert(isa<SCEVConstant>(Step) &&
3354            "Expected constant step for pointer induction");
3355     return B.CreateGEP(
3356         ID.getElementType(), StartValue,
3357         CreateMul(Index,
3358                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3359                                     GetInsertPoint())));
3360   }
3361   case InductionDescriptor::IK_FpInduction: {
3362     assert(!isa<VectorType>(Index->getType()) &&
3363            "Vector indices not supported for FP inductions yet");
3364     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3365     auto InductionBinOp = ID.getInductionBinOp();
3366     assert(InductionBinOp &&
3367            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3368             InductionBinOp->getOpcode() == Instruction::FSub) &&
3369            "Original bin op should be defined for FP induction");
3370 
3371     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3372     Value *MulExp = B.CreateFMul(StepValue, Index);
3373     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3374                          "induction");
3375   }
3376   case InductionDescriptor::IK_NoInduction:
3377     return nullptr;
3378   }
3379   llvm_unreachable("invalid enum");
3380 }
3381 
3382 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3383   LoopScalarBody = OrigLoop->getHeader();
3384   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3385   assert(LoopVectorPreHeader && "Invalid loop structure");
3386   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3387   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3388          "multiple exit loop without required epilogue?");
3389 
3390   LoopMiddleBlock =
3391       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3392                  LI, nullptr, Twine(Prefix) + "middle.block");
3393   LoopScalarPreHeader =
3394       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3395                  nullptr, Twine(Prefix) + "scalar.ph");
3396 
3397   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3398 
3399   // Set up the middle block terminator.  Two cases:
3400   // 1) If we know that we must execute the scalar epilogue, emit an
3401   //    unconditional branch.
3402   // 2) Otherwise, we must have a single unique exit block (due to how we
3403   //    implement the multiple exit case).  In this case, set up a conditonal
3404   //    branch from the middle block to the loop scalar preheader, and the
3405   //    exit block.  completeLoopSkeleton will update the condition to use an
3406   //    iteration check, if required to decide whether to execute the remainder.
3407   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3408     BranchInst::Create(LoopScalarPreHeader) :
3409     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3410                        Builder.getTrue());
3411   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3412   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3413 
3414   // We intentionally don't let SplitBlock to update LoopInfo since
3415   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3416   // LoopVectorBody is explicitly added to the correct place few lines later.
3417   LoopVectorBody =
3418       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3419                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3420 
3421   // Update dominator for loop exit.
3422   if (!Cost->requiresScalarEpilogue(VF))
3423     // If there is an epilogue which must run, there's no edge from the
3424     // middle block to exit blocks  and thus no need to update the immediate
3425     // dominator of the exit blocks.
3426     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3427 
3428   // Create and register the new vector loop.
3429   Loop *Lp = LI->AllocateLoop();
3430   Loop *ParentLoop = OrigLoop->getParentLoop();
3431 
3432   // Insert the new loop into the loop nest and register the new basic blocks
3433   // before calling any utilities such as SCEV that require valid LoopInfo.
3434   if (ParentLoop) {
3435     ParentLoop->addChildLoop(Lp);
3436   } else {
3437     LI->addTopLevelLoop(Lp);
3438   }
3439   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3440   return Lp;
3441 }
3442 
3443 void InnerLoopVectorizer::createInductionResumeValues(
3444     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3445   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3446           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3447          "Inconsistent information about additional bypass.");
3448 
3449   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3450   assert(VectorTripCount && L && "Expected valid arguments");
3451   // We are going to resume the execution of the scalar loop.
3452   // Go over all of the induction variables that we found and fix the
3453   // PHIs that are left in the scalar version of the loop.
3454   // The starting values of PHI nodes depend on the counter of the last
3455   // iteration in the vectorized loop.
3456   // If we come from a bypass edge then we need to start from the original
3457   // start value.
3458   Instruction *OldInduction = Legal->getPrimaryInduction();
3459   for (auto &InductionEntry : Legal->getInductionVars()) {
3460     PHINode *OrigPhi = InductionEntry.first;
3461     InductionDescriptor II = InductionEntry.second;
3462 
3463     // Create phi nodes to merge from the  backedge-taken check block.
3464     PHINode *BCResumeVal =
3465         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3466                         LoopScalarPreHeader->getTerminator());
3467     // Copy original phi DL over to the new one.
3468     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3469     Value *&EndValue = IVEndValues[OrigPhi];
3470     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3471     if (OrigPhi == OldInduction) {
3472       // We know what the end value is.
3473       EndValue = VectorTripCount;
3474     } else {
3475       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3476 
3477       // Fast-math-flags propagate from the original induction instruction.
3478       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3479         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3480 
3481       Type *StepType = II.getStep()->getType();
3482       Instruction::CastOps CastOp =
3483           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3484       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3485       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3486       EndValue =
3487           emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3488       EndValue->setName("ind.end");
3489 
3490       // Compute the end value for the additional bypass (if applicable).
3491       if (AdditionalBypass.first) {
3492         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3493         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3494                                          StepType, true);
3495         CRD =
3496             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3497         EndValueFromAdditionalBypass =
3498             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3499         EndValueFromAdditionalBypass->setName("ind.end");
3500       }
3501     }
3502     // The new PHI merges the original incoming value, in case of a bypass,
3503     // or the value at the end of the vectorized loop.
3504     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3505 
3506     // Fix the scalar body counter (PHI node).
3507     // The old induction's phi node in the scalar body needs the truncated
3508     // value.
3509     for (BasicBlock *BB : LoopBypassBlocks)
3510       BCResumeVal->addIncoming(II.getStartValue(), BB);
3511 
3512     if (AdditionalBypass.first)
3513       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3514                                             EndValueFromAdditionalBypass);
3515 
3516     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3517   }
3518 }
3519 
3520 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3521                                                       MDNode *OrigLoopID) {
3522   assert(L && "Expected valid loop.");
3523 
3524   // The trip counts should be cached by now.
3525   Value *Count = getOrCreateTripCount(L);
3526   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3527 
3528   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3529 
3530   // Add a check in the middle block to see if we have completed
3531   // all of the iterations in the first vector loop.  Three cases:
3532   // 1) If we require a scalar epilogue, there is no conditional branch as
3533   //    we unconditionally branch to the scalar preheader.  Do nothing.
3534   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3535   //    Thus if tail is to be folded, we know we don't need to run the
3536   //    remainder and we can use the previous value for the condition (true).
3537   // 3) Otherwise, construct a runtime check.
3538   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3539     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3540                                         Count, VectorTripCount, "cmp.n",
3541                                         LoopMiddleBlock->getTerminator());
3542 
3543     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3544     // of the corresponding compare because they may have ended up with
3545     // different line numbers and we want to avoid awkward line stepping while
3546     // debugging. Eg. if the compare has got a line number inside the loop.
3547     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3548     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3549   }
3550 
3551   // Get ready to start creating new instructions into the vectorized body.
3552   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3553          "Inconsistent vector loop preheader");
3554   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3555 
3556 #ifdef EXPENSIVE_CHECKS
3557   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3558   LI->verify(*DT);
3559 #endif
3560 
3561   return LoopVectorPreHeader;
3562 }
3563 
3564 std::pair<BasicBlock *, Value *>
3565 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3566   /*
3567    In this function we generate a new loop. The new loop will contain
3568    the vectorized instructions while the old loop will continue to run the
3569    scalar remainder.
3570 
3571        [ ] <-- loop iteration number check.
3572     /   |
3573    /    v
3574   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3575   |  /  |
3576   | /   v
3577   ||   [ ]     <-- vector pre header.
3578   |/    |
3579   |     v
3580   |    [  ] \
3581   |    [  ]_|   <-- vector loop.
3582   |     |
3583   |     v
3584   \   -[ ]   <--- middle-block.
3585    \/   |
3586    /\   v
3587    | ->[ ]     <--- new preheader.
3588    |    |
3589  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3590    |   [ ] \
3591    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3592     \   |
3593      \  v
3594       >[ ]     <-- exit block(s).
3595    ...
3596    */
3597 
3598   // Get the metadata of the original loop before it gets modified.
3599   MDNode *OrigLoopID = OrigLoop->getLoopID();
3600 
3601   // Workaround!  Compute the trip count of the original loop and cache it
3602   // before we start modifying the CFG.  This code has a systemic problem
3603   // wherein it tries to run analysis over partially constructed IR; this is
3604   // wrong, and not simply for SCEV.  The trip count of the original loop
3605   // simply happens to be prone to hitting this in practice.  In theory, we
3606   // can hit the same issue for any SCEV, or ValueTracking query done during
3607   // mutation.  See PR49900.
3608   getOrCreateTripCount(OrigLoop);
3609 
3610   // Create an empty vector loop, and prepare basic blocks for the runtime
3611   // checks.
3612   Loop *Lp = createVectorLoopSkeleton("");
3613 
3614   // Now, compare the new count to zero. If it is zero skip the vector loop and
3615   // jump to the scalar loop. This check also covers the case where the
3616   // backedge-taken count is uint##_max: adding one to it will overflow leading
3617   // to an incorrect trip count of zero. In this (rare) case we will also jump
3618   // to the scalar loop.
3619   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3620 
3621   // Generate the code to check any assumptions that we've made for SCEV
3622   // expressions.
3623   emitSCEVChecks(Lp, LoopScalarPreHeader);
3624 
3625   // Generate the code that checks in runtime if arrays overlap. We put the
3626   // checks into a separate block to make the more common case of few elements
3627   // faster.
3628   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3629 
3630   createHeaderBranch(Lp);
3631 
3632   // Emit phis for the new starting index of the scalar loop.
3633   createInductionResumeValues(Lp);
3634 
3635   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3636 }
3637 
3638 // Fix up external users of the induction variable. At this point, we are
3639 // in LCSSA form, with all external PHIs that use the IV having one input value,
3640 // coming from the remainder loop. We need those PHIs to also have a correct
3641 // value for the IV when arriving directly from the middle block.
3642 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3643                                        const InductionDescriptor &II,
3644                                        Value *CountRoundDown, Value *EndValue,
3645                                        BasicBlock *MiddleBlock) {
3646   // There are two kinds of external IV usages - those that use the value
3647   // computed in the last iteration (the PHI) and those that use the penultimate
3648   // value (the value that feeds into the phi from the loop latch).
3649   // We allow both, but they, obviously, have different values.
3650 
3651   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3652 
3653   DenseMap<Value *, Value *> MissingVals;
3654 
3655   // An external user of the last iteration's value should see the value that
3656   // the remainder loop uses to initialize its own IV.
3657   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3658   for (User *U : PostInc->users()) {
3659     Instruction *UI = cast<Instruction>(U);
3660     if (!OrigLoop->contains(UI)) {
3661       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3662       MissingVals[UI] = EndValue;
3663     }
3664   }
3665 
3666   // An external user of the penultimate value need to see EndValue - Step.
3667   // The simplest way to get this is to recompute it from the constituent SCEVs,
3668   // that is Start + (Step * (CRD - 1)).
3669   for (User *U : OrigPhi->users()) {
3670     auto *UI = cast<Instruction>(U);
3671     if (!OrigLoop->contains(UI)) {
3672       const DataLayout &DL =
3673           OrigLoop->getHeader()->getModule()->getDataLayout();
3674       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3675 
3676       IRBuilder<> B(MiddleBlock->getTerminator());
3677 
3678       // Fast-math-flags propagate from the original induction instruction.
3679       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3680         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3681 
3682       Value *CountMinusOne = B.CreateSub(
3683           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3684       Value *CMO =
3685           !II.getStep()->getType()->isIntegerTy()
3686               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3687                              II.getStep()->getType())
3688               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3689       CMO->setName("cast.cmo");
3690       Value *Escape =
3691           emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody);
3692       Escape->setName("ind.escape");
3693       MissingVals[UI] = Escape;
3694     }
3695   }
3696 
3697   for (auto &I : MissingVals) {
3698     PHINode *PHI = cast<PHINode>(I.first);
3699     // One corner case we have to handle is two IVs "chasing" each-other,
3700     // that is %IV2 = phi [...], [ %IV1, %latch ]
3701     // In this case, if IV1 has an external use, we need to avoid adding both
3702     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3703     // don't already have an incoming value for the middle block.
3704     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3705       PHI->addIncoming(I.second, MiddleBlock);
3706   }
3707 }
3708 
3709 namespace {
3710 
3711 struct CSEDenseMapInfo {
3712   static bool canHandle(const Instruction *I) {
3713     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3714            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3715   }
3716 
3717   static inline Instruction *getEmptyKey() {
3718     return DenseMapInfo<Instruction *>::getEmptyKey();
3719   }
3720 
3721   static inline Instruction *getTombstoneKey() {
3722     return DenseMapInfo<Instruction *>::getTombstoneKey();
3723   }
3724 
3725   static unsigned getHashValue(const Instruction *I) {
3726     assert(canHandle(I) && "Unknown instruction!");
3727     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3728                                                            I->value_op_end()));
3729   }
3730 
3731   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3732     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3733         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3734       return LHS == RHS;
3735     return LHS->isIdenticalTo(RHS);
3736   }
3737 };
3738 
3739 } // end anonymous namespace
3740 
3741 ///Perform cse of induction variable instructions.
3742 static void cse(BasicBlock *BB) {
3743   // Perform simple cse.
3744   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3745   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3746     if (!CSEDenseMapInfo::canHandle(&In))
3747       continue;
3748 
3749     // Check if we can replace this instruction with any of the
3750     // visited instructions.
3751     if (Instruction *V = CSEMap.lookup(&In)) {
3752       In.replaceAllUsesWith(V);
3753       In.eraseFromParent();
3754       continue;
3755     }
3756 
3757     CSEMap[&In] = &In;
3758   }
3759 }
3760 
3761 InstructionCost
3762 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3763                                               bool &NeedToScalarize) const {
3764   Function *F = CI->getCalledFunction();
3765   Type *ScalarRetTy = CI->getType();
3766   SmallVector<Type *, 4> Tys, ScalarTys;
3767   for (auto &ArgOp : CI->args())
3768     ScalarTys.push_back(ArgOp->getType());
3769 
3770   // Estimate cost of scalarized vector call. The source operands are assumed
3771   // to be vectors, so we need to extract individual elements from there,
3772   // execute VF scalar calls, and then gather the result into the vector return
3773   // value.
3774   InstructionCost ScalarCallCost =
3775       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3776   if (VF.isScalar())
3777     return ScalarCallCost;
3778 
3779   // Compute corresponding vector type for return value and arguments.
3780   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3781   for (Type *ScalarTy : ScalarTys)
3782     Tys.push_back(ToVectorTy(ScalarTy, VF));
3783 
3784   // Compute costs of unpacking argument values for the scalar calls and
3785   // packing the return values to a vector.
3786   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3787 
3788   InstructionCost Cost =
3789       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3790 
3791   // If we can't emit a vector call for this function, then the currently found
3792   // cost is the cost we need to return.
3793   NeedToScalarize = true;
3794   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3795   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3796 
3797   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3798     return Cost;
3799 
3800   // If the corresponding vector cost is cheaper, return its cost.
3801   InstructionCost VectorCallCost =
3802       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3803   if (VectorCallCost < Cost) {
3804     NeedToScalarize = false;
3805     Cost = VectorCallCost;
3806   }
3807   return Cost;
3808 }
3809 
3810 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3811   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3812     return Elt;
3813   return VectorType::get(Elt, VF);
3814 }
3815 
3816 InstructionCost
3817 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3818                                                    ElementCount VF) const {
3819   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3820   assert(ID && "Expected intrinsic call!");
3821   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3822   FastMathFlags FMF;
3823   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3824     FMF = FPMO->getFastMathFlags();
3825 
3826   SmallVector<const Value *> Arguments(CI->args());
3827   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3828   SmallVector<Type *> ParamTys;
3829   std::transform(FTy->param_begin(), FTy->param_end(),
3830                  std::back_inserter(ParamTys),
3831                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3832 
3833   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3834                                     dyn_cast<IntrinsicInst>(CI));
3835   return TTI.getIntrinsicInstrCost(CostAttrs,
3836                                    TargetTransformInfo::TCK_RecipThroughput);
3837 }
3838 
3839 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3840   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3841   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3842   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3843 }
3844 
3845 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3846   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3847   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3848   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3849 }
3850 
3851 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3852   // For every instruction `I` in MinBWs, truncate the operands, create a
3853   // truncated version of `I` and reextend its result. InstCombine runs
3854   // later and will remove any ext/trunc pairs.
3855   SmallPtrSet<Value *, 4> Erased;
3856   for (const auto &KV : Cost->getMinimalBitwidths()) {
3857     // If the value wasn't vectorized, we must maintain the original scalar
3858     // type. The absence of the value from State indicates that it
3859     // wasn't vectorized.
3860     // FIXME: Should not rely on getVPValue at this point.
3861     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3862     if (!State.hasAnyVectorValue(Def))
3863       continue;
3864     for (unsigned Part = 0; Part < UF; ++Part) {
3865       Value *I = State.get(Def, Part);
3866       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3867         continue;
3868       Type *OriginalTy = I->getType();
3869       Type *ScalarTruncatedTy =
3870           IntegerType::get(OriginalTy->getContext(), KV.second);
3871       auto *TruncatedTy = VectorType::get(
3872           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3873       if (TruncatedTy == OriginalTy)
3874         continue;
3875 
3876       IRBuilder<> B(cast<Instruction>(I));
3877       auto ShrinkOperand = [&](Value *V) -> Value * {
3878         if (auto *ZI = dyn_cast<ZExtInst>(V))
3879           if (ZI->getSrcTy() == TruncatedTy)
3880             return ZI->getOperand(0);
3881         return B.CreateZExtOrTrunc(V, TruncatedTy);
3882       };
3883 
3884       // The actual instruction modification depends on the instruction type,
3885       // unfortunately.
3886       Value *NewI = nullptr;
3887       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3888         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3889                              ShrinkOperand(BO->getOperand(1)));
3890 
3891         // Any wrapping introduced by shrinking this operation shouldn't be
3892         // considered undefined behavior. So, we can't unconditionally copy
3893         // arithmetic wrapping flags to NewI.
3894         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3895       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3896         NewI =
3897             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3898                          ShrinkOperand(CI->getOperand(1)));
3899       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3900         NewI = B.CreateSelect(SI->getCondition(),
3901                               ShrinkOperand(SI->getTrueValue()),
3902                               ShrinkOperand(SI->getFalseValue()));
3903       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3904         switch (CI->getOpcode()) {
3905         default:
3906           llvm_unreachable("Unhandled cast!");
3907         case Instruction::Trunc:
3908           NewI = ShrinkOperand(CI->getOperand(0));
3909           break;
3910         case Instruction::SExt:
3911           NewI = B.CreateSExtOrTrunc(
3912               CI->getOperand(0),
3913               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3914           break;
3915         case Instruction::ZExt:
3916           NewI = B.CreateZExtOrTrunc(
3917               CI->getOperand(0),
3918               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3919           break;
3920         }
3921       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3922         auto Elements0 =
3923             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3924         auto *O0 = B.CreateZExtOrTrunc(
3925             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3926         auto Elements1 =
3927             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3928         auto *O1 = B.CreateZExtOrTrunc(
3929             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3930 
3931         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3932       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3933         // Don't do anything with the operands, just extend the result.
3934         continue;
3935       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3936         auto Elements =
3937             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3938         auto *O0 = B.CreateZExtOrTrunc(
3939             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3940         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3941         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3942       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3943         auto Elements =
3944             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3945         auto *O0 = B.CreateZExtOrTrunc(
3946             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3947         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3948       } else {
3949         // If we don't know what to do, be conservative and don't do anything.
3950         continue;
3951       }
3952 
3953       // Lastly, extend the result.
3954       NewI->takeName(cast<Instruction>(I));
3955       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3956       I->replaceAllUsesWith(Res);
3957       cast<Instruction>(I)->eraseFromParent();
3958       Erased.insert(I);
3959       State.reset(Def, Res, Part);
3960     }
3961   }
3962 
3963   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3964   for (const auto &KV : Cost->getMinimalBitwidths()) {
3965     // If the value wasn't vectorized, we must maintain the original scalar
3966     // type. The absence of the value from State indicates that it
3967     // wasn't vectorized.
3968     // FIXME: Should not rely on getVPValue at this point.
3969     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3970     if (!State.hasAnyVectorValue(Def))
3971       continue;
3972     for (unsigned Part = 0; Part < UF; ++Part) {
3973       Value *I = State.get(Def, Part);
3974       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3975       if (Inst && Inst->use_empty()) {
3976         Value *NewI = Inst->getOperand(0);
3977         Inst->eraseFromParent();
3978         State.reset(Def, NewI, Part);
3979       }
3980     }
3981   }
3982 }
3983 
3984 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3985   // Insert truncates and extends for any truncated instructions as hints to
3986   // InstCombine.
3987   if (VF.isVector())
3988     truncateToMinimalBitwidths(State);
3989 
3990   // Fix widened non-induction PHIs by setting up the PHI operands.
3991   if (OrigPHIsToFix.size()) {
3992     assert(EnableVPlanNativePath &&
3993            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3994     fixNonInductionPHIs(State);
3995   }
3996 
3997   // At this point every instruction in the original loop is widened to a
3998   // vector form. Now we need to fix the recurrences in the loop. These PHI
3999   // nodes are currently empty because we did not want to introduce cycles.
4000   // This is the second stage of vectorizing recurrences.
4001   fixCrossIterationPHIs(State);
4002 
4003   // Forget the original basic block.
4004   PSE.getSE()->forgetLoop(OrigLoop);
4005 
4006   // If we inserted an edge from the middle block to the unique exit block,
4007   // update uses outside the loop (phis) to account for the newly inserted
4008   // edge.
4009   if (!Cost->requiresScalarEpilogue(VF)) {
4010     // Fix-up external users of the induction variables.
4011     for (auto &Entry : Legal->getInductionVars())
4012       fixupIVUsers(Entry.first, Entry.second,
4013                    getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4014                    IVEndValues[Entry.first], LoopMiddleBlock);
4015 
4016     fixLCSSAPHIs(State);
4017   }
4018 
4019   for (Instruction *PI : PredicatedInstructions)
4020     sinkScalarOperands(&*PI);
4021 
4022   // Remove redundant induction instructions.
4023   cse(LoopVectorBody);
4024 
4025   // Set/update profile weights for the vector and remainder loops as original
4026   // loop iterations are now distributed among them. Note that original loop
4027   // represented by LoopScalarBody becomes remainder loop after vectorization.
4028   //
4029   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4030   // end up getting slightly roughened result but that should be OK since
4031   // profile is not inherently precise anyway. Note also possible bypass of
4032   // vector code caused by legality checks is ignored, assigning all the weight
4033   // to the vector loop, optimistically.
4034   //
4035   // For scalable vectorization we can't know at compile time how many iterations
4036   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4037   // vscale of '1'.
4038   setProfileInfoAfterUnrolling(
4039       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4040       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4041 }
4042 
4043 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4044   // In order to support recurrences we need to be able to vectorize Phi nodes.
4045   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4046   // stage #2: We now need to fix the recurrences by adding incoming edges to
4047   // the currently empty PHI nodes. At this point every instruction in the
4048   // original loop is widened to a vector form so we can use them to construct
4049   // the incoming edges.
4050   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4051   for (VPRecipeBase &R : Header->phis()) {
4052     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
4053       fixReduction(ReductionPhi, State);
4054     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
4055       fixFirstOrderRecurrence(FOR, State);
4056   }
4057 }
4058 
4059 void InnerLoopVectorizer::fixFirstOrderRecurrence(
4060     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
4061   // This is the second phase of vectorizing first-order recurrences. An
4062   // overview of the transformation is described below. Suppose we have the
4063   // following loop.
4064   //
4065   //   for (int i = 0; i < n; ++i)
4066   //     b[i] = a[i] - a[i - 1];
4067   //
4068   // There is a first-order recurrence on "a". For this loop, the shorthand
4069   // scalar IR looks like:
4070   //
4071   //   scalar.ph:
4072   //     s_init = a[-1]
4073   //     br scalar.body
4074   //
4075   //   scalar.body:
4076   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4077   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4078   //     s2 = a[i]
4079   //     b[i] = s2 - s1
4080   //     br cond, scalar.body, ...
4081   //
4082   // In this example, s1 is a recurrence because it's value depends on the
4083   // previous iteration. In the first phase of vectorization, we created a
4084   // vector phi v1 for s1. We now complete the vectorization and produce the
4085   // shorthand vector IR shown below (for VF = 4, UF = 1).
4086   //
4087   //   vector.ph:
4088   //     v_init = vector(..., ..., ..., a[-1])
4089   //     br vector.body
4090   //
4091   //   vector.body
4092   //     i = phi [0, vector.ph], [i+4, vector.body]
4093   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4094   //     v2 = a[i, i+1, i+2, i+3];
4095   //     v3 = vector(v1(3), v2(0, 1, 2))
4096   //     b[i, i+1, i+2, i+3] = v2 - v3
4097   //     br cond, vector.body, middle.block
4098   //
4099   //   middle.block:
4100   //     x = v2(3)
4101   //     br scalar.ph
4102   //
4103   //   scalar.ph:
4104   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4105   //     br scalar.body
4106   //
4107   // After execution completes the vector loop, we extract the next value of
4108   // the recurrence (x) to use as the initial value in the scalar loop.
4109 
4110   // Extract the last vector element in the middle block. This will be the
4111   // initial value for the recurrence when jumping to the scalar loop.
4112   VPValue *PreviousDef = PhiR->getBackedgeValue();
4113   Value *Incoming = State.get(PreviousDef, UF - 1);
4114   auto *ExtractForScalar = Incoming;
4115   auto *IdxTy = Builder.getInt32Ty();
4116   if (VF.isVector()) {
4117     auto *One = ConstantInt::get(IdxTy, 1);
4118     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4119     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4120     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4121     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4122                                                     "vector.recur.extract");
4123   }
4124   // Extract the second last element in the middle block if the
4125   // Phi is used outside the loop. We need to extract the phi itself
4126   // and not the last element (the phi update in the current iteration). This
4127   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4128   // when the scalar loop is not run at all.
4129   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4130   if (VF.isVector()) {
4131     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4132     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4133     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4134         Incoming, Idx, "vector.recur.extract.for.phi");
4135   } else if (UF > 1)
4136     // When loop is unrolled without vectorizing, initialize
4137     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4138     // of `Incoming`. This is analogous to the vectorized case above: extracting
4139     // the second last element when VF > 1.
4140     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4141 
4142   // Fix the initial value of the original recurrence in the scalar loop.
4143   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4144   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4145   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4146   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4147   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4148     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4149     Start->addIncoming(Incoming, BB);
4150   }
4151 
4152   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4153   Phi->setName("scalar.recur");
4154 
4155   // Finally, fix users of the recurrence outside the loop. The users will need
4156   // either the last value of the scalar recurrence or the last value of the
4157   // vector recurrence we extracted in the middle block. Since the loop is in
4158   // LCSSA form, we just need to find all the phi nodes for the original scalar
4159   // recurrence in the exit block, and then add an edge for the middle block.
4160   // Note that LCSSA does not imply single entry when the original scalar loop
4161   // had multiple exiting edges (as we always run the last iteration in the
4162   // scalar epilogue); in that case, there is no edge from middle to exit and
4163   // and thus no phis which needed updated.
4164   if (!Cost->requiresScalarEpilogue(VF))
4165     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4166       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
4167         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4168 }
4169 
4170 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4171                                        VPTransformState &State) {
4172   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4173   // Get it's reduction variable descriptor.
4174   assert(Legal->isReductionVariable(OrigPhi) &&
4175          "Unable to find the reduction variable");
4176   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4177 
4178   RecurKind RK = RdxDesc.getRecurrenceKind();
4179   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4180   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4181   setDebugLocFromInst(ReductionStartValue);
4182 
4183   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4184   // This is the vector-clone of the value that leaves the loop.
4185   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4186 
4187   // Wrap flags are in general invalid after vectorization, clear them.
4188   clearReductionWrapFlags(RdxDesc, State);
4189 
4190   // Before each round, move the insertion point right between
4191   // the PHIs and the values we are going to write.
4192   // This allows us to write both PHINodes and the extractelement
4193   // instructions.
4194   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4195 
4196   setDebugLocFromInst(LoopExitInst);
4197 
4198   Type *PhiTy = OrigPhi->getType();
4199   // If tail is folded by masking, the vector value to leave the loop should be
4200   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4201   // instead of the former. For an inloop reduction the reduction will already
4202   // be predicated, and does not need to be handled here.
4203   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4204     for (unsigned Part = 0; Part < UF; ++Part) {
4205       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4206       Value *Sel = nullptr;
4207       for (User *U : VecLoopExitInst->users()) {
4208         if (isa<SelectInst>(U)) {
4209           assert(!Sel && "Reduction exit feeding two selects");
4210           Sel = U;
4211         } else
4212           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4213       }
4214       assert(Sel && "Reduction exit feeds no select");
4215       State.reset(LoopExitInstDef, Sel, Part);
4216 
4217       // If the target can create a predicated operator for the reduction at no
4218       // extra cost in the loop (for example a predicated vadd), it can be
4219       // cheaper for the select to remain in the loop than be sunk out of it,
4220       // and so use the select value for the phi instead of the old
4221       // LoopExitValue.
4222       if (PreferPredicatedReductionSelect ||
4223           TTI->preferPredicatedReductionSelect(
4224               RdxDesc.getOpcode(), PhiTy,
4225               TargetTransformInfo::ReductionFlags())) {
4226         auto *VecRdxPhi =
4227             cast<PHINode>(State.get(PhiR, Part));
4228         VecRdxPhi->setIncomingValueForBlock(
4229             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4230       }
4231     }
4232   }
4233 
4234   // If the vector reduction can be performed in a smaller type, we truncate
4235   // then extend the loop exit value to enable InstCombine to evaluate the
4236   // entire expression in the smaller type.
4237   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4238     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4239     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4240     Builder.SetInsertPoint(
4241         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4242     VectorParts RdxParts(UF);
4243     for (unsigned Part = 0; Part < UF; ++Part) {
4244       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4245       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4246       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4247                                         : Builder.CreateZExt(Trunc, VecTy);
4248       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
4249         if (U != Trunc) {
4250           U->replaceUsesOfWith(RdxParts[Part], Extnd);
4251           RdxParts[Part] = Extnd;
4252         }
4253     }
4254     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4255     for (unsigned Part = 0; Part < UF; ++Part) {
4256       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4257       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4258     }
4259   }
4260 
4261   // Reduce all of the unrolled parts into a single vector.
4262   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4263   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4264 
4265   // The middle block terminator has already been assigned a DebugLoc here (the
4266   // OrigLoop's single latch terminator). We want the whole middle block to
4267   // appear to execute on this line because: (a) it is all compiler generated,
4268   // (b) these instructions are always executed after evaluating the latch
4269   // conditional branch, and (c) other passes may add new predecessors which
4270   // terminate on this line. This is the easiest way to ensure we don't
4271   // accidentally cause an extra step back into the loop while debugging.
4272   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4273   if (PhiR->isOrdered())
4274     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4275   else {
4276     // Floating-point operations should have some FMF to enable the reduction.
4277     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4278     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4279     for (unsigned Part = 1; Part < UF; ++Part) {
4280       Value *RdxPart = State.get(LoopExitInstDef, Part);
4281       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4282         ReducedPartRdx = Builder.CreateBinOp(
4283             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4284       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4285         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4286                                            ReducedPartRdx, RdxPart);
4287       else
4288         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4289     }
4290   }
4291 
4292   // Create the reduction after the loop. Note that inloop reductions create the
4293   // target reduction in the loop using a Reduction recipe.
4294   if (VF.isVector() && !PhiR->isInLoop()) {
4295     ReducedPartRdx =
4296         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4297     // If the reduction can be performed in a smaller type, we need to extend
4298     // the reduction to the wider type before we branch to the original loop.
4299     if (PhiTy != RdxDesc.getRecurrenceType())
4300       ReducedPartRdx = RdxDesc.isSigned()
4301                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4302                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4303   }
4304 
4305   // Create a phi node that merges control-flow from the backedge-taken check
4306   // block and the middle block.
4307   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4308                                         LoopScalarPreHeader->getTerminator());
4309   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4310     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4311   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4312 
4313   // Now, we need to fix the users of the reduction variable
4314   // inside and outside of the scalar remainder loop.
4315 
4316   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4317   // in the exit blocks.  See comment on analogous loop in
4318   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4319   if (!Cost->requiresScalarEpilogue(VF))
4320     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4321       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4322         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4323 
4324   // Fix the scalar loop reduction variable with the incoming reduction sum
4325   // from the vector body and from the backedge value.
4326   int IncomingEdgeBlockIdx =
4327       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4328   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4329   // Pick the other block.
4330   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4331   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4332   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4333 }
4334 
4335 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4336                                                   VPTransformState &State) {
4337   RecurKind RK = RdxDesc.getRecurrenceKind();
4338   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4339     return;
4340 
4341   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4342   assert(LoopExitInstr && "null loop exit instruction");
4343   SmallVector<Instruction *, 8> Worklist;
4344   SmallPtrSet<Instruction *, 8> Visited;
4345   Worklist.push_back(LoopExitInstr);
4346   Visited.insert(LoopExitInstr);
4347 
4348   while (!Worklist.empty()) {
4349     Instruction *Cur = Worklist.pop_back_val();
4350     if (isa<OverflowingBinaryOperator>(Cur))
4351       for (unsigned Part = 0; Part < UF; ++Part) {
4352         // FIXME: Should not rely on getVPValue at this point.
4353         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4354         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4355       }
4356 
4357     for (User *U : Cur->users()) {
4358       Instruction *UI = cast<Instruction>(U);
4359       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4360           Visited.insert(UI).second)
4361         Worklist.push_back(UI);
4362     }
4363   }
4364 }
4365 
4366 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4367   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4368     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4369       // Some phis were already hand updated by the reduction and recurrence
4370       // code above, leave them alone.
4371       continue;
4372 
4373     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4374     // Non-instruction incoming values will have only one value.
4375 
4376     VPLane Lane = VPLane::getFirstLane();
4377     if (isa<Instruction>(IncomingValue) &&
4378         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4379                                            VF))
4380       Lane = VPLane::getLastLaneForVF(VF);
4381 
4382     // Can be a loop invariant incoming value or the last scalar value to be
4383     // extracted from the vectorized loop.
4384     // FIXME: Should not rely on getVPValue at this point.
4385     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4386     Value *lastIncomingValue =
4387         OrigLoop->isLoopInvariant(IncomingValue)
4388             ? IncomingValue
4389             : State.get(State.Plan->getVPValue(IncomingValue, true),
4390                         VPIteration(UF - 1, Lane));
4391     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4392   }
4393 }
4394 
4395 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4396   // The basic block and loop containing the predicated instruction.
4397   auto *PredBB = PredInst->getParent();
4398   auto *VectorLoop = LI->getLoopFor(PredBB);
4399 
4400   // Initialize a worklist with the operands of the predicated instruction.
4401   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4402 
4403   // Holds instructions that we need to analyze again. An instruction may be
4404   // reanalyzed if we don't yet know if we can sink it or not.
4405   SmallVector<Instruction *, 8> InstsToReanalyze;
4406 
4407   // Returns true if a given use occurs in the predicated block. Phi nodes use
4408   // their operands in their corresponding predecessor blocks.
4409   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4410     auto *I = cast<Instruction>(U.getUser());
4411     BasicBlock *BB = I->getParent();
4412     if (auto *Phi = dyn_cast<PHINode>(I))
4413       BB = Phi->getIncomingBlock(
4414           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4415     return BB == PredBB;
4416   };
4417 
4418   // Iteratively sink the scalarized operands of the predicated instruction
4419   // into the block we created for it. When an instruction is sunk, it's
4420   // operands are then added to the worklist. The algorithm ends after one pass
4421   // through the worklist doesn't sink a single instruction.
4422   bool Changed;
4423   do {
4424     // Add the instructions that need to be reanalyzed to the worklist, and
4425     // reset the changed indicator.
4426     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4427     InstsToReanalyze.clear();
4428     Changed = false;
4429 
4430     while (!Worklist.empty()) {
4431       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4432 
4433       // We can't sink an instruction if it is a phi node, is not in the loop,
4434       // or may have side effects.
4435       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4436           I->mayHaveSideEffects())
4437         continue;
4438 
4439       // If the instruction is already in PredBB, check if we can sink its
4440       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4441       // sinking the scalar instruction I, hence it appears in PredBB; but it
4442       // may have failed to sink I's operands (recursively), which we try
4443       // (again) here.
4444       if (I->getParent() == PredBB) {
4445         Worklist.insert(I->op_begin(), I->op_end());
4446         continue;
4447       }
4448 
4449       // It's legal to sink the instruction if all its uses occur in the
4450       // predicated block. Otherwise, there's nothing to do yet, and we may
4451       // need to reanalyze the instruction.
4452       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4453         InstsToReanalyze.push_back(I);
4454         continue;
4455       }
4456 
4457       // Move the instruction to the beginning of the predicated block, and add
4458       // it's operands to the worklist.
4459       I->moveBefore(&*PredBB->getFirstInsertionPt());
4460       Worklist.insert(I->op_begin(), I->op_end());
4461 
4462       // The sinking may have enabled other instructions to be sunk, so we will
4463       // need to iterate.
4464       Changed = true;
4465     }
4466   } while (Changed);
4467 }
4468 
4469 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4470   for (PHINode *OrigPhi : OrigPHIsToFix) {
4471     VPWidenPHIRecipe *VPPhi =
4472         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4473     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4474     // Make sure the builder has a valid insert point.
4475     Builder.SetInsertPoint(NewPhi);
4476     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4477       VPValue *Inc = VPPhi->getIncomingValue(i);
4478       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4479       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4480     }
4481   }
4482 }
4483 
4484 bool InnerLoopVectorizer::useOrderedReductions(
4485     const RecurrenceDescriptor &RdxDesc) {
4486   return Cost->useOrderedReductions(RdxDesc);
4487 }
4488 
4489 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4490                                               VPWidenPHIRecipe *PhiR,
4491                                               VPTransformState &State) {
4492   PHINode *P = cast<PHINode>(PN);
4493   if (EnableVPlanNativePath) {
4494     // Currently we enter here in the VPlan-native path for non-induction
4495     // PHIs where all control flow is uniform. We simply widen these PHIs.
4496     // Create a vector phi with no operands - the vector phi operands will be
4497     // set at the end of vector code generation.
4498     Type *VecTy = (State.VF.isScalar())
4499                       ? PN->getType()
4500                       : VectorType::get(PN->getType(), State.VF);
4501     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4502     State.set(PhiR, VecPhi, 0);
4503     OrigPHIsToFix.push_back(P);
4504 
4505     return;
4506   }
4507 
4508   assert(PN->getParent() == OrigLoop->getHeader() &&
4509          "Non-header phis should have been handled elsewhere");
4510 
4511   // In order to support recurrences we need to be able to vectorize Phi nodes.
4512   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4513   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4514   // this value when we vectorize all of the instructions that use the PHI.
4515 
4516   assert(!Legal->isReductionVariable(P) &&
4517          "reductions should be handled elsewhere");
4518 
4519   setDebugLocFromInst(P);
4520 
4521   // This PHINode must be an induction variable.
4522   // Make sure that we know about it.
4523   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4524 
4525   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4526   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4527 
4528   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4529   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4530 
4531   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4532   // which can be found from the original scalar operations.
4533   switch (II.getKind()) {
4534   case InductionDescriptor::IK_NoInduction:
4535     llvm_unreachable("Unknown induction");
4536   case InductionDescriptor::IK_IntInduction:
4537   case InductionDescriptor::IK_FpInduction:
4538     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4539   case InductionDescriptor::IK_PtrInduction: {
4540     // Handle the pointer induction variable case.
4541     assert(P->getType()->isPointerTy() && "Unexpected type.");
4542 
4543     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4544       // This is the normalized GEP that starts counting at zero.
4545       Value *PtrInd =
4546           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4547       // Determine the number of scalars we need to generate for each unroll
4548       // iteration. If the instruction is uniform, we only need to generate the
4549       // first lane. Otherwise, we generate all VF values.
4550       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4551       assert((IsUniform || !State.VF.isScalable()) &&
4552              "Cannot scalarize a scalable VF");
4553       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4554 
4555       for (unsigned Part = 0; Part < UF; ++Part) {
4556         Value *PartStart =
4557             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4558 
4559         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4560           Value *Idx = Builder.CreateAdd(
4561               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4562           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4563           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(),
4564                                                 DL, II, State.CFG.PrevBB);
4565           SclrGep->setName("next.gep");
4566           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4567         }
4568       }
4569       return;
4570     }
4571     assert(isa<SCEVConstant>(II.getStep()) &&
4572            "Induction step not a SCEV constant!");
4573     Type *PhiType = II.getStep()->getType();
4574 
4575     // Build a pointer phi
4576     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4577     Type *ScStValueType = ScalarStartValue->getType();
4578     PHINode *NewPointerPhi =
4579         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4580     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4581 
4582     // A pointer induction, performed by using a gep
4583     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4584     Instruction *InductionLoc = LoopLatch->getTerminator();
4585     const SCEV *ScalarStep = II.getStep();
4586     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4587     Value *ScalarStepValue =
4588         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4589     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4590     Value *NumUnrolledElems =
4591         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4592     Value *InductionGEP = GetElementPtrInst::Create(
4593         II.getElementType(), NewPointerPhi,
4594         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4595         InductionLoc);
4596     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4597 
4598     // Create UF many actual address geps that use the pointer
4599     // phi as base and a vectorized version of the step value
4600     // (<step*0, ..., step*N>) as offset.
4601     for (unsigned Part = 0; Part < State.UF; ++Part) {
4602       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4603       Value *StartOffsetScalar =
4604           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4605       Value *StartOffset =
4606           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4607       // Create a vector of consecutive numbers from zero to VF.
4608       StartOffset =
4609           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4610 
4611       Value *GEP = Builder.CreateGEP(
4612           II.getElementType(), NewPointerPhi,
4613           Builder.CreateMul(
4614               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4615               "vector.gep"));
4616       State.set(PhiR, GEP, Part);
4617     }
4618   }
4619   }
4620 }
4621 
4622 /// A helper function for checking whether an integer division-related
4623 /// instruction may divide by zero (in which case it must be predicated if
4624 /// executed conditionally in the scalar code).
4625 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4626 /// Non-zero divisors that are non compile-time constants will not be
4627 /// converted into multiplication, so we will still end up scalarizing
4628 /// the division, but can do so w/o predication.
4629 static bool mayDivideByZero(Instruction &I) {
4630   assert((I.getOpcode() == Instruction::UDiv ||
4631           I.getOpcode() == Instruction::SDiv ||
4632           I.getOpcode() == Instruction::URem ||
4633           I.getOpcode() == Instruction::SRem) &&
4634          "Unexpected instruction");
4635   Value *Divisor = I.getOperand(1);
4636   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4637   return !CInt || CInt->isZero();
4638 }
4639 
4640 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4641                                                VPUser &ArgOperands,
4642                                                VPTransformState &State) {
4643   assert(!isa<DbgInfoIntrinsic>(I) &&
4644          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4645   setDebugLocFromInst(&I);
4646 
4647   Module *M = I.getParent()->getParent()->getParent();
4648   auto *CI = cast<CallInst>(&I);
4649 
4650   SmallVector<Type *, 4> Tys;
4651   for (Value *ArgOperand : CI->args())
4652     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4653 
4654   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4655 
4656   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4657   // version of the instruction.
4658   // Is it beneficial to perform intrinsic call compared to lib call?
4659   bool NeedToScalarize = false;
4660   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4661   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4662   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4663   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4664          "Instruction should be scalarized elsewhere.");
4665   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4666          "Either the intrinsic cost or vector call cost must be valid");
4667 
4668   for (unsigned Part = 0; Part < UF; ++Part) {
4669     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4670     SmallVector<Value *, 4> Args;
4671     for (auto &I : enumerate(ArgOperands.operands())) {
4672       // Some intrinsics have a scalar argument - don't replace it with a
4673       // vector.
4674       Value *Arg;
4675       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4676         Arg = State.get(I.value(), Part);
4677       else {
4678         Arg = State.get(I.value(), VPIteration(0, 0));
4679         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4680           TysForDecl.push_back(Arg->getType());
4681       }
4682       Args.push_back(Arg);
4683     }
4684 
4685     Function *VectorF;
4686     if (UseVectorIntrinsic) {
4687       // Use vector version of the intrinsic.
4688       if (VF.isVector())
4689         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4690       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4691       assert(VectorF && "Can't retrieve vector intrinsic.");
4692     } else {
4693       // Use vector version of the function call.
4694       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4695 #ifndef NDEBUG
4696       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4697              "Can't create vector function.");
4698 #endif
4699         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4700     }
4701       SmallVector<OperandBundleDef, 1> OpBundles;
4702       CI->getOperandBundlesAsDefs(OpBundles);
4703       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4704 
4705       if (isa<FPMathOperator>(V))
4706         V->copyFastMathFlags(CI);
4707 
4708       State.set(Def, V, Part);
4709       addMetadata(V, &I);
4710   }
4711 }
4712 
4713 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4714   // We should not collect Scalars more than once per VF. Right now, this
4715   // function is called from collectUniformsAndScalars(), which already does
4716   // this check. Collecting Scalars for VF=1 does not make any sense.
4717   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4718          "This function should not be visited twice for the same VF");
4719 
4720   SmallSetVector<Instruction *, 8> Worklist;
4721 
4722   // These sets are used to seed the analysis with pointers used by memory
4723   // accesses that will remain scalar.
4724   SmallSetVector<Instruction *, 8> ScalarPtrs;
4725   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4726   auto *Latch = TheLoop->getLoopLatch();
4727 
4728   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4729   // The pointer operands of loads and stores will be scalar as long as the
4730   // memory access is not a gather or scatter operation. The value operand of a
4731   // store will remain scalar if the store is scalarized.
4732   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4733     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4734     assert(WideningDecision != CM_Unknown &&
4735            "Widening decision should be ready at this moment");
4736     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4737       if (Ptr == Store->getValueOperand())
4738         return WideningDecision == CM_Scalarize;
4739     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4740            "Ptr is neither a value or pointer operand");
4741     return WideningDecision != CM_GatherScatter;
4742   };
4743 
4744   // A helper that returns true if the given value is a bitcast or
4745   // getelementptr instruction contained in the loop.
4746   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4747     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4748             isa<GetElementPtrInst>(V)) &&
4749            !TheLoop->isLoopInvariant(V);
4750   };
4751 
4752   // A helper that evaluates a memory access's use of a pointer. If the use will
4753   // be a scalar use and the pointer is only used by memory accesses, we place
4754   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4755   // PossibleNonScalarPtrs.
4756   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4757     // We only care about bitcast and getelementptr instructions contained in
4758     // the loop.
4759     if (!isLoopVaryingBitCastOrGEP(Ptr))
4760       return;
4761 
4762     // If the pointer has already been identified as scalar (e.g., if it was
4763     // also identified as uniform), there's nothing to do.
4764     auto *I = cast<Instruction>(Ptr);
4765     if (Worklist.count(I))
4766       return;
4767 
4768     // If the use of the pointer will be a scalar use, and all users of the
4769     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4770     // place the pointer in PossibleNonScalarPtrs.
4771     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4772           return isa<LoadInst>(U) || isa<StoreInst>(U);
4773         }))
4774       ScalarPtrs.insert(I);
4775     else
4776       PossibleNonScalarPtrs.insert(I);
4777   };
4778 
4779   // We seed the scalars analysis with three classes of instructions: (1)
4780   // instructions marked uniform-after-vectorization and (2) bitcast,
4781   // getelementptr and (pointer) phi instructions used by memory accesses
4782   // requiring a scalar use.
4783   //
4784   // (1) Add to the worklist all instructions that have been identified as
4785   // uniform-after-vectorization.
4786   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4787 
4788   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4789   // memory accesses requiring a scalar use. The pointer operands of loads and
4790   // stores will be scalar as long as the memory accesses is not a gather or
4791   // scatter operation. The value operand of a store will remain scalar if the
4792   // store is scalarized.
4793   for (auto *BB : TheLoop->blocks())
4794     for (auto &I : *BB) {
4795       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4796         evaluatePtrUse(Load, Load->getPointerOperand());
4797       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4798         evaluatePtrUse(Store, Store->getPointerOperand());
4799         evaluatePtrUse(Store, Store->getValueOperand());
4800       }
4801     }
4802   for (auto *I : ScalarPtrs)
4803     if (!PossibleNonScalarPtrs.count(I)) {
4804       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4805       Worklist.insert(I);
4806     }
4807 
4808   // Insert the forced scalars.
4809   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4810   // induction variable when the PHI user is scalarized.
4811   auto ForcedScalar = ForcedScalars.find(VF);
4812   if (ForcedScalar != ForcedScalars.end())
4813     for (auto *I : ForcedScalar->second)
4814       Worklist.insert(I);
4815 
4816   // Expand the worklist by looking through any bitcasts and getelementptr
4817   // instructions we've already identified as scalar. This is similar to the
4818   // expansion step in collectLoopUniforms(); however, here we're only
4819   // expanding to include additional bitcasts and getelementptr instructions.
4820   unsigned Idx = 0;
4821   while (Idx != Worklist.size()) {
4822     Instruction *Dst = Worklist[Idx++];
4823     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4824       continue;
4825     auto *Src = cast<Instruction>(Dst->getOperand(0));
4826     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4827           auto *J = cast<Instruction>(U);
4828           return !TheLoop->contains(J) || Worklist.count(J) ||
4829                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4830                   isScalarUse(J, Src));
4831         })) {
4832       Worklist.insert(Src);
4833       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4834     }
4835   }
4836 
4837   // An induction variable will remain scalar if all users of the induction
4838   // variable and induction variable update remain scalar.
4839   for (auto &Induction : Legal->getInductionVars()) {
4840     auto *Ind = Induction.first;
4841     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4842 
4843     // If tail-folding is applied, the primary induction variable will be used
4844     // to feed a vector compare.
4845     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4846       continue;
4847 
4848     // Returns true if \p Indvar is a pointer induction that is used directly by
4849     // load/store instruction \p I.
4850     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4851                                               Instruction *I) {
4852       return Induction.second.getKind() ==
4853                  InductionDescriptor::IK_PtrInduction &&
4854              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4855              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4856     };
4857 
4858     // Determine if all users of the induction variable are scalar after
4859     // vectorization.
4860     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4861       auto *I = cast<Instruction>(U);
4862       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4863              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4864     });
4865     if (!ScalarInd)
4866       continue;
4867 
4868     // Determine if all users of the induction variable update instruction are
4869     // scalar after vectorization.
4870     auto ScalarIndUpdate =
4871         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4872           auto *I = cast<Instruction>(U);
4873           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4874                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4875         });
4876     if (!ScalarIndUpdate)
4877       continue;
4878 
4879     // The induction variable and its update instruction will remain scalar.
4880     Worklist.insert(Ind);
4881     Worklist.insert(IndUpdate);
4882     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4883     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4884                       << "\n");
4885   }
4886 
4887   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4888 }
4889 
4890 bool LoopVectorizationCostModel::isScalarWithPredication(
4891     Instruction *I, ElementCount VF) const {
4892   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4893     return false;
4894   switch(I->getOpcode()) {
4895   default:
4896     break;
4897   case Instruction::Load:
4898   case Instruction::Store: {
4899     if (!Legal->isMaskRequired(I))
4900       return false;
4901     auto *Ptr = getLoadStorePointerOperand(I);
4902     auto *Ty = getLoadStoreType(I);
4903     Type *VTy = Ty;
4904     if (VF.isVector())
4905       VTy = VectorType::get(Ty, VF);
4906     const Align Alignment = getLoadStoreAlignment(I);
4907     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4908                                 TTI.isLegalMaskedGather(VTy, Alignment))
4909                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4910                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4911   }
4912   case Instruction::UDiv:
4913   case Instruction::SDiv:
4914   case Instruction::SRem:
4915   case Instruction::URem:
4916     return mayDivideByZero(*I);
4917   }
4918   return false;
4919 }
4920 
4921 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4922     Instruction *I, ElementCount VF) {
4923   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4924   assert(getWideningDecision(I, VF) == CM_Unknown &&
4925          "Decision should not be set yet.");
4926   auto *Group = getInterleavedAccessGroup(I);
4927   assert(Group && "Must have a group.");
4928 
4929   // If the instruction's allocated size doesn't equal it's type size, it
4930   // requires padding and will be scalarized.
4931   auto &DL = I->getModule()->getDataLayout();
4932   auto *ScalarTy = getLoadStoreType(I);
4933   if (hasIrregularType(ScalarTy, DL))
4934     return false;
4935 
4936   // Check if masking is required.
4937   // A Group may need masking for one of two reasons: it resides in a block that
4938   // needs predication, or it was decided to use masking to deal with gaps
4939   // (either a gap at the end of a load-access that may result in a speculative
4940   // load, or any gaps in a store-access).
4941   bool PredicatedAccessRequiresMasking =
4942       blockNeedsPredicationForAnyReason(I->getParent()) &&
4943       Legal->isMaskRequired(I);
4944   bool LoadAccessWithGapsRequiresEpilogMasking =
4945       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4946       !isScalarEpilogueAllowed();
4947   bool StoreAccessWithGapsRequiresMasking =
4948       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4949   if (!PredicatedAccessRequiresMasking &&
4950       !LoadAccessWithGapsRequiresEpilogMasking &&
4951       !StoreAccessWithGapsRequiresMasking)
4952     return true;
4953 
4954   // If masked interleaving is required, we expect that the user/target had
4955   // enabled it, because otherwise it either wouldn't have been created or
4956   // it should have been invalidated by the CostModel.
4957   assert(useMaskedInterleavedAccesses(TTI) &&
4958          "Masked interleave-groups for predicated accesses are not enabled.");
4959 
4960   if (Group->isReverse())
4961     return false;
4962 
4963   auto *Ty = getLoadStoreType(I);
4964   const Align Alignment = getLoadStoreAlignment(I);
4965   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4966                           : TTI.isLegalMaskedStore(Ty, Alignment);
4967 }
4968 
4969 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4970     Instruction *I, ElementCount VF) {
4971   // Get and ensure we have a valid memory instruction.
4972   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4973 
4974   auto *Ptr = getLoadStorePointerOperand(I);
4975   auto *ScalarTy = getLoadStoreType(I);
4976 
4977   // In order to be widened, the pointer should be consecutive, first of all.
4978   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4979     return false;
4980 
4981   // If the instruction is a store located in a predicated block, it will be
4982   // scalarized.
4983   if (isScalarWithPredication(I, VF))
4984     return false;
4985 
4986   // If the instruction's allocated size doesn't equal it's type size, it
4987   // requires padding and will be scalarized.
4988   auto &DL = I->getModule()->getDataLayout();
4989   if (hasIrregularType(ScalarTy, DL))
4990     return false;
4991 
4992   return true;
4993 }
4994 
4995 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4996   // We should not collect Uniforms more than once per VF. Right now,
4997   // this function is called from collectUniformsAndScalars(), which
4998   // already does this check. Collecting Uniforms for VF=1 does not make any
4999   // sense.
5000 
5001   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5002          "This function should not be visited twice for the same VF");
5003 
5004   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5005   // not analyze again.  Uniforms.count(VF) will return 1.
5006   Uniforms[VF].clear();
5007 
5008   // We now know that the loop is vectorizable!
5009   // Collect instructions inside the loop that will remain uniform after
5010   // vectorization.
5011 
5012   // Global values, params and instructions outside of current loop are out of
5013   // scope.
5014   auto isOutOfScope = [&](Value *V) -> bool {
5015     Instruction *I = dyn_cast<Instruction>(V);
5016     return (!I || !TheLoop->contains(I));
5017   };
5018 
5019   // Worklist containing uniform instructions demanding lane 0.
5020   SetVector<Instruction *> Worklist;
5021   BasicBlock *Latch = TheLoop->getLoopLatch();
5022 
5023   // Add uniform instructions demanding lane 0 to the worklist. Instructions
5024   // that are scalar with predication must not be considered uniform after
5025   // vectorization, because that would create an erroneous replicating region
5026   // where only a single instance out of VF should be formed.
5027   // TODO: optimize such seldom cases if found important, see PR40816.
5028   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5029     if (isOutOfScope(I)) {
5030       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5031                         << *I << "\n");
5032       return;
5033     }
5034     if (isScalarWithPredication(I, VF)) {
5035       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5036                         << *I << "\n");
5037       return;
5038     }
5039     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5040     Worklist.insert(I);
5041   };
5042 
5043   // Start with the conditional branch. If the branch condition is an
5044   // instruction contained in the loop that is only used by the branch, it is
5045   // uniform.
5046   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5047   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5048     addToWorklistIfAllowed(Cmp);
5049 
5050   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5051     InstWidening WideningDecision = getWideningDecision(I, VF);
5052     assert(WideningDecision != CM_Unknown &&
5053            "Widening decision should be ready at this moment");
5054 
5055     // A uniform memory op is itself uniform.  We exclude uniform stores
5056     // here as they demand the last lane, not the first one.
5057     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5058       assert(WideningDecision == CM_Scalarize);
5059       return true;
5060     }
5061 
5062     return (WideningDecision == CM_Widen ||
5063             WideningDecision == CM_Widen_Reverse ||
5064             WideningDecision == CM_Interleave);
5065   };
5066 
5067 
5068   // Returns true if Ptr is the pointer operand of a memory access instruction
5069   // I, and I is known to not require scalarization.
5070   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5071     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5072   };
5073 
5074   // Holds a list of values which are known to have at least one uniform use.
5075   // Note that there may be other uses which aren't uniform.  A "uniform use"
5076   // here is something which only demands lane 0 of the unrolled iterations;
5077   // it does not imply that all lanes produce the same value (e.g. this is not
5078   // the usual meaning of uniform)
5079   SetVector<Value *> HasUniformUse;
5080 
5081   // Scan the loop for instructions which are either a) known to have only
5082   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5083   for (auto *BB : TheLoop->blocks())
5084     for (auto &I : *BB) {
5085       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5086         switch (II->getIntrinsicID()) {
5087         case Intrinsic::sideeffect:
5088         case Intrinsic::experimental_noalias_scope_decl:
5089         case Intrinsic::assume:
5090         case Intrinsic::lifetime_start:
5091         case Intrinsic::lifetime_end:
5092           if (TheLoop->hasLoopInvariantOperands(&I))
5093             addToWorklistIfAllowed(&I);
5094           break;
5095         default:
5096           break;
5097         }
5098       }
5099 
5100       // ExtractValue instructions must be uniform, because the operands are
5101       // known to be loop-invariant.
5102       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5103         assert(isOutOfScope(EVI->getAggregateOperand()) &&
5104                "Expected aggregate value to be loop invariant");
5105         addToWorklistIfAllowed(EVI);
5106         continue;
5107       }
5108 
5109       // If there's no pointer operand, there's nothing to do.
5110       auto *Ptr = getLoadStorePointerOperand(&I);
5111       if (!Ptr)
5112         continue;
5113 
5114       // A uniform memory op is itself uniform.  We exclude uniform stores
5115       // here as they demand the last lane, not the first one.
5116       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5117         addToWorklistIfAllowed(&I);
5118 
5119       if (isUniformDecision(&I, VF)) {
5120         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5121         HasUniformUse.insert(Ptr);
5122       }
5123     }
5124 
5125   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5126   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5127   // disallows uses outside the loop as well.
5128   for (auto *V : HasUniformUse) {
5129     if (isOutOfScope(V))
5130       continue;
5131     auto *I = cast<Instruction>(V);
5132     auto UsersAreMemAccesses =
5133       llvm::all_of(I->users(), [&](User *U) -> bool {
5134         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5135       });
5136     if (UsersAreMemAccesses)
5137       addToWorklistIfAllowed(I);
5138   }
5139 
5140   // Expand Worklist in topological order: whenever a new instruction
5141   // is added , its users should be already inside Worklist.  It ensures
5142   // a uniform instruction will only be used by uniform instructions.
5143   unsigned idx = 0;
5144   while (idx != Worklist.size()) {
5145     Instruction *I = Worklist[idx++];
5146 
5147     for (auto OV : I->operand_values()) {
5148       // isOutOfScope operands cannot be uniform instructions.
5149       if (isOutOfScope(OV))
5150         continue;
5151       // First order recurrence Phi's should typically be considered
5152       // non-uniform.
5153       auto *OP = dyn_cast<PHINode>(OV);
5154       if (OP && Legal->isFirstOrderRecurrence(OP))
5155         continue;
5156       // If all the users of the operand are uniform, then add the
5157       // operand into the uniform worklist.
5158       auto *OI = cast<Instruction>(OV);
5159       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5160             auto *J = cast<Instruction>(U);
5161             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5162           }))
5163         addToWorklistIfAllowed(OI);
5164     }
5165   }
5166 
5167   // For an instruction to be added into Worklist above, all its users inside
5168   // the loop should also be in Worklist. However, this condition cannot be
5169   // true for phi nodes that form a cyclic dependence. We must process phi
5170   // nodes separately. An induction variable will remain uniform if all users
5171   // of the induction variable and induction variable update remain uniform.
5172   // The code below handles both pointer and non-pointer induction variables.
5173   for (auto &Induction : Legal->getInductionVars()) {
5174     auto *Ind = Induction.first;
5175     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5176 
5177     // Determine if all users of the induction variable are uniform after
5178     // vectorization.
5179     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5180       auto *I = cast<Instruction>(U);
5181       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5182              isVectorizedMemAccessUse(I, Ind);
5183     });
5184     if (!UniformInd)
5185       continue;
5186 
5187     // Determine if all users of the induction variable update instruction are
5188     // uniform after vectorization.
5189     auto UniformIndUpdate =
5190         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5191           auto *I = cast<Instruction>(U);
5192           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5193                  isVectorizedMemAccessUse(I, IndUpdate);
5194         });
5195     if (!UniformIndUpdate)
5196       continue;
5197 
5198     // The induction variable and its update instruction will remain uniform.
5199     addToWorklistIfAllowed(Ind);
5200     addToWorklistIfAllowed(IndUpdate);
5201   }
5202 
5203   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5204 }
5205 
5206 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5207   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5208 
5209   if (Legal->getRuntimePointerChecking()->Need) {
5210     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5211         "runtime pointer checks needed. Enable vectorization of this "
5212         "loop with '#pragma clang loop vectorize(enable)' when "
5213         "compiling with -Os/-Oz",
5214         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5215     return true;
5216   }
5217 
5218   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5219     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5220         "runtime SCEV checks needed. Enable vectorization of this "
5221         "loop with '#pragma clang loop vectorize(enable)' when "
5222         "compiling with -Os/-Oz",
5223         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5224     return true;
5225   }
5226 
5227   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5228   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5229     reportVectorizationFailure("Runtime stride check for small trip count",
5230         "runtime stride == 1 checks needed. Enable vectorization of "
5231         "this loop without such check by compiling with -Os/-Oz",
5232         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5233     return true;
5234   }
5235 
5236   return false;
5237 }
5238 
5239 ElementCount
5240 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5241   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5242     return ElementCount::getScalable(0);
5243 
5244   if (Hints->isScalableVectorizationDisabled()) {
5245     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5246                             "ScalableVectorizationDisabled", ORE, TheLoop);
5247     return ElementCount::getScalable(0);
5248   }
5249 
5250   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5251 
5252   auto MaxScalableVF = ElementCount::getScalable(
5253       std::numeric_limits<ElementCount::ScalarTy>::max());
5254 
5255   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5256   // FIXME: While for scalable vectors this is currently sufficient, this should
5257   // be replaced by a more detailed mechanism that filters out specific VFs,
5258   // instead of invalidating vectorization for a whole set of VFs based on the
5259   // MaxVF.
5260 
5261   // Disable scalable vectorization if the loop contains unsupported reductions.
5262   if (!canVectorizeReductions(MaxScalableVF)) {
5263     reportVectorizationInfo(
5264         "Scalable vectorization not supported for the reduction "
5265         "operations found in this loop.",
5266         "ScalableVFUnfeasible", ORE, TheLoop);
5267     return ElementCount::getScalable(0);
5268   }
5269 
5270   // Disable scalable vectorization if the loop contains any instructions
5271   // with element types not supported for scalable vectors.
5272   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5273         return !Ty->isVoidTy() &&
5274                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5275       })) {
5276     reportVectorizationInfo("Scalable vectorization is not supported "
5277                             "for all element types found in this loop.",
5278                             "ScalableVFUnfeasible", ORE, TheLoop);
5279     return ElementCount::getScalable(0);
5280   }
5281 
5282   if (Legal->isSafeForAnyVectorWidth())
5283     return MaxScalableVF;
5284 
5285   // Limit MaxScalableVF by the maximum safe dependence distance.
5286   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5287   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5288     MaxVScale =
5289         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5290   MaxScalableVF = ElementCount::getScalable(
5291       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5292   if (!MaxScalableVF)
5293     reportVectorizationInfo(
5294         "Max legal vector width too small, scalable vectorization "
5295         "unfeasible.",
5296         "ScalableVFUnfeasible", ORE, TheLoop);
5297 
5298   return MaxScalableVF;
5299 }
5300 
5301 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5302     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5303   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5304   unsigned SmallestType, WidestType;
5305   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5306 
5307   // Get the maximum safe dependence distance in bits computed by LAA.
5308   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5309   // the memory accesses that is most restrictive (involved in the smallest
5310   // dependence distance).
5311   unsigned MaxSafeElements =
5312       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5313 
5314   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5315   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5316 
5317   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5318                     << ".\n");
5319   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5320                     << ".\n");
5321 
5322   // First analyze the UserVF, fall back if the UserVF should be ignored.
5323   if (UserVF) {
5324     auto MaxSafeUserVF =
5325         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5326 
5327     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5328       // If `VF=vscale x N` is safe, then so is `VF=N`
5329       if (UserVF.isScalable())
5330         return FixedScalableVFPair(
5331             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5332       else
5333         return UserVF;
5334     }
5335 
5336     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5337 
5338     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5339     // is better to ignore the hint and let the compiler choose a suitable VF.
5340     if (!UserVF.isScalable()) {
5341       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5342                         << " is unsafe, clamping to max safe VF="
5343                         << MaxSafeFixedVF << ".\n");
5344       ORE->emit([&]() {
5345         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5346                                           TheLoop->getStartLoc(),
5347                                           TheLoop->getHeader())
5348                << "User-specified vectorization factor "
5349                << ore::NV("UserVectorizationFactor", UserVF)
5350                << " is unsafe, clamping to maximum safe vectorization factor "
5351                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5352       });
5353       return MaxSafeFixedVF;
5354     }
5355 
5356     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5357       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5358                         << " is ignored because scalable vectors are not "
5359                            "available.\n");
5360       ORE->emit([&]() {
5361         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5362                                           TheLoop->getStartLoc(),
5363                                           TheLoop->getHeader())
5364                << "User-specified vectorization factor "
5365                << ore::NV("UserVectorizationFactor", UserVF)
5366                << " is ignored because the target does not support scalable "
5367                   "vectors. The compiler will pick a more suitable value.";
5368       });
5369     } else {
5370       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5371                         << " is unsafe. Ignoring scalable UserVF.\n");
5372       ORE->emit([&]() {
5373         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5374                                           TheLoop->getStartLoc(),
5375                                           TheLoop->getHeader())
5376                << "User-specified vectorization factor "
5377                << ore::NV("UserVectorizationFactor", UserVF)
5378                << " is unsafe. Ignoring the hint to let the compiler pick a "
5379                   "more suitable value.";
5380       });
5381     }
5382   }
5383 
5384   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5385                     << " / " << WidestType << " bits.\n");
5386 
5387   FixedScalableVFPair Result(ElementCount::getFixed(1),
5388                              ElementCount::getScalable(0));
5389   if (auto MaxVF =
5390           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5391                                   MaxSafeFixedVF, FoldTailByMasking))
5392     Result.FixedVF = MaxVF;
5393 
5394   if (auto MaxVF =
5395           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5396                                   MaxSafeScalableVF, FoldTailByMasking))
5397     if (MaxVF.isScalable()) {
5398       Result.ScalableVF = MaxVF;
5399       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5400                         << "\n");
5401     }
5402 
5403   return Result;
5404 }
5405 
5406 FixedScalableVFPair
5407 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5408   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5409     // TODO: It may by useful to do since it's still likely to be dynamically
5410     // uniform if the target can skip.
5411     reportVectorizationFailure(
5412         "Not inserting runtime ptr check for divergent target",
5413         "runtime pointer checks needed. Not enabled for divergent target",
5414         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5415     return FixedScalableVFPair::getNone();
5416   }
5417 
5418   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5419   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5420   if (TC == 1) {
5421     reportVectorizationFailure("Single iteration (non) loop",
5422         "loop trip count is one, irrelevant for vectorization",
5423         "SingleIterationLoop", ORE, TheLoop);
5424     return FixedScalableVFPair::getNone();
5425   }
5426 
5427   switch (ScalarEpilogueStatus) {
5428   case CM_ScalarEpilogueAllowed:
5429     return computeFeasibleMaxVF(TC, UserVF, false);
5430   case CM_ScalarEpilogueNotAllowedUsePredicate:
5431     LLVM_FALLTHROUGH;
5432   case CM_ScalarEpilogueNotNeededUsePredicate:
5433     LLVM_DEBUG(
5434         dbgs() << "LV: vector predicate hint/switch found.\n"
5435                << "LV: Not allowing scalar epilogue, creating predicated "
5436                << "vector loop.\n");
5437     break;
5438   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5439     // fallthrough as a special case of OptForSize
5440   case CM_ScalarEpilogueNotAllowedOptSize:
5441     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5442       LLVM_DEBUG(
5443           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5444     else
5445       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5446                         << "count.\n");
5447 
5448     // Bail if runtime checks are required, which are not good when optimising
5449     // for size.
5450     if (runtimeChecksRequired())
5451       return FixedScalableVFPair::getNone();
5452 
5453     break;
5454   }
5455 
5456   // The only loops we can vectorize without a scalar epilogue, are loops with
5457   // a bottom-test and a single exiting block. We'd have to handle the fact
5458   // that not every instruction executes on the last iteration.  This will
5459   // require a lane mask which varies through the vector loop body.  (TODO)
5460   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5461     // If there was a tail-folding hint/switch, but we can't fold the tail by
5462     // masking, fallback to a vectorization with a scalar epilogue.
5463     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5464       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5465                            "scalar epilogue instead.\n");
5466       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5467       return computeFeasibleMaxVF(TC, UserVF, false);
5468     }
5469     return FixedScalableVFPair::getNone();
5470   }
5471 
5472   // Now try the tail folding
5473 
5474   // Invalidate interleave groups that require an epilogue if we can't mask
5475   // the interleave-group.
5476   if (!useMaskedInterleavedAccesses(TTI)) {
5477     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5478            "No decisions should have been taken at this point");
5479     // Note: There is no need to invalidate any cost modeling decisions here, as
5480     // non where taken so far.
5481     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5482   }
5483 
5484   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5485   // Avoid tail folding if the trip count is known to be a multiple of any VF
5486   // we chose.
5487   // FIXME: The condition below pessimises the case for fixed-width vectors,
5488   // when scalable VFs are also candidates for vectorization.
5489   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5490     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5491     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5492            "MaxFixedVF must be a power of 2");
5493     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5494                                    : MaxFixedVF.getFixedValue();
5495     ScalarEvolution *SE = PSE.getSE();
5496     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5497     const SCEV *ExitCount = SE->getAddExpr(
5498         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5499     const SCEV *Rem = SE->getURemExpr(
5500         SE->applyLoopGuards(ExitCount, TheLoop),
5501         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5502     if (Rem->isZero()) {
5503       // Accept MaxFixedVF if we do not have a tail.
5504       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5505       return MaxFactors;
5506     }
5507   }
5508 
5509   // For scalable vectors don't use tail folding for low trip counts or
5510   // optimizing for code size. We only permit this if the user has explicitly
5511   // requested it.
5512   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5513       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5514       MaxFactors.ScalableVF.isVector())
5515     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5516 
5517   // If we don't know the precise trip count, or if the trip count that we
5518   // found modulo the vectorization factor is not zero, try to fold the tail
5519   // by masking.
5520   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5521   if (Legal->prepareToFoldTailByMasking()) {
5522     FoldTailByMasking = true;
5523     return MaxFactors;
5524   }
5525 
5526   // If there was a tail-folding hint/switch, but we can't fold the tail by
5527   // masking, fallback to a vectorization with a scalar epilogue.
5528   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5529     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5530                          "scalar epilogue instead.\n");
5531     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5532     return MaxFactors;
5533   }
5534 
5535   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5536     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5537     return FixedScalableVFPair::getNone();
5538   }
5539 
5540   if (TC == 0) {
5541     reportVectorizationFailure(
5542         "Unable to calculate the loop count due to complex control flow",
5543         "unable to calculate the loop count due to complex control flow",
5544         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5545     return FixedScalableVFPair::getNone();
5546   }
5547 
5548   reportVectorizationFailure(
5549       "Cannot optimize for size and vectorize at the same time.",
5550       "cannot optimize for size and vectorize at the same time. "
5551       "Enable vectorization of this loop with '#pragma clang loop "
5552       "vectorize(enable)' when compiling with -Os/-Oz",
5553       "NoTailLoopWithOptForSize", ORE, TheLoop);
5554   return FixedScalableVFPair::getNone();
5555 }
5556 
5557 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5558     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5559     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5560   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5561   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5562       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5563                            : TargetTransformInfo::RGK_FixedWidthVector);
5564 
5565   // Convenience function to return the minimum of two ElementCounts.
5566   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5567     assert((LHS.isScalable() == RHS.isScalable()) &&
5568            "Scalable flags must match");
5569     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5570   };
5571 
5572   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5573   // Note that both WidestRegister and WidestType may not be a powers of 2.
5574   auto MaxVectorElementCount = ElementCount::get(
5575       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5576       ComputeScalableMaxVF);
5577   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5578   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5579                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5580 
5581   if (!MaxVectorElementCount) {
5582     LLVM_DEBUG(dbgs() << "LV: The target has no "
5583                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5584                       << " vector registers.\n");
5585     return ElementCount::getFixed(1);
5586   }
5587 
5588   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5589   if (ConstTripCount &&
5590       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5591       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5592     // If loop trip count (TC) is known at compile time there is no point in
5593     // choosing VF greater than TC (as done in the loop below). Select maximum
5594     // power of two which doesn't exceed TC.
5595     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5596     // when the TC is less than or equal to the known number of lanes.
5597     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5598     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5599                          "exceeding the constant trip count: "
5600                       << ClampedConstTripCount << "\n");
5601     return ElementCount::getFixed(ClampedConstTripCount);
5602   }
5603 
5604   ElementCount MaxVF = MaxVectorElementCount;
5605   if (TTI.shouldMaximizeVectorBandwidth() ||
5606       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5607     auto MaxVectorElementCountMaxBW = ElementCount::get(
5608         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5609         ComputeScalableMaxVF);
5610     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5611 
5612     // Collect all viable vectorization factors larger than the default MaxVF
5613     // (i.e. MaxVectorElementCount).
5614     SmallVector<ElementCount, 8> VFs;
5615     for (ElementCount VS = MaxVectorElementCount * 2;
5616          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5617       VFs.push_back(VS);
5618 
5619     // For each VF calculate its register usage.
5620     auto RUs = calculateRegisterUsage(VFs);
5621 
5622     // Select the largest VF which doesn't require more registers than existing
5623     // ones.
5624     for (int i = RUs.size() - 1; i >= 0; --i) {
5625       bool Selected = true;
5626       for (auto &pair : RUs[i].MaxLocalUsers) {
5627         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5628         if (pair.second > TargetNumRegisters)
5629           Selected = false;
5630       }
5631       if (Selected) {
5632         MaxVF = VFs[i];
5633         break;
5634       }
5635     }
5636     if (ElementCount MinVF =
5637             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5638       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5639         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5640                           << ") with target's minimum: " << MinVF << '\n');
5641         MaxVF = MinVF;
5642       }
5643     }
5644   }
5645   return MaxVF;
5646 }
5647 
5648 bool LoopVectorizationCostModel::isMoreProfitable(
5649     const VectorizationFactor &A, const VectorizationFactor &B) const {
5650   InstructionCost CostA = A.Cost;
5651   InstructionCost CostB = B.Cost;
5652 
5653   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5654 
5655   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5656       MaxTripCount) {
5657     // If we are folding the tail and the trip count is a known (possibly small)
5658     // constant, the trip count will be rounded up to an integer number of
5659     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5660     // which we compare directly. When not folding the tail, the total cost will
5661     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5662     // approximated with the per-lane cost below instead of using the tripcount
5663     // as here.
5664     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5665     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5666     return RTCostA < RTCostB;
5667   }
5668 
5669   // Improve estimate for the vector width if it is scalable.
5670   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5671   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5672   if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) {
5673     if (A.Width.isScalable())
5674       EstimatedWidthA *= VScale.getValue();
5675     if (B.Width.isScalable())
5676       EstimatedWidthB *= VScale.getValue();
5677   }
5678 
5679   // Assume vscale may be larger than 1 (or the value being tuned for),
5680   // so that scalable vectorization is slightly favorable over fixed-width
5681   // vectorization.
5682   if (A.Width.isScalable() && !B.Width.isScalable())
5683     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5684 
5685   // To avoid the need for FP division:
5686   //      (CostA / A.Width) < (CostB / B.Width)
5687   // <=>  (CostA * B.Width) < (CostB * A.Width)
5688   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5689 }
5690 
5691 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5692     const ElementCountSet &VFCandidates) {
5693   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5694   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5695   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5696   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5697          "Expected Scalar VF to be a candidate");
5698 
5699   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5700   VectorizationFactor ChosenFactor = ScalarCost;
5701 
5702   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5703   if (ForceVectorization && VFCandidates.size() > 1) {
5704     // Ignore scalar width, because the user explicitly wants vectorization.
5705     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5706     // evaluation.
5707     ChosenFactor.Cost = InstructionCost::getMax();
5708   }
5709 
5710   SmallVector<InstructionVFPair> InvalidCosts;
5711   for (const auto &i : VFCandidates) {
5712     // The cost for scalar VF=1 is already calculated, so ignore it.
5713     if (i.isScalar())
5714       continue;
5715 
5716     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5717     VectorizationFactor Candidate(i, C.first);
5718 
5719 #ifndef NDEBUG
5720     unsigned AssumedMinimumVscale = 1;
5721     if (Optional<unsigned> VScale = TTI.getVScaleForTuning())
5722       AssumedMinimumVscale = VScale.getValue();
5723     unsigned Width =
5724         Candidate.Width.isScalable()
5725             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5726             : Candidate.Width.getFixedValue();
5727     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5728                       << " costs: " << (Candidate.Cost / Width));
5729     if (i.isScalable())
5730       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5731                         << AssumedMinimumVscale << ")");
5732     LLVM_DEBUG(dbgs() << ".\n");
5733 #endif
5734 
5735     if (!C.second && !ForceVectorization) {
5736       LLVM_DEBUG(
5737           dbgs() << "LV: Not considering vector loop of width " << i
5738                  << " because it will not generate any vector instructions.\n");
5739       continue;
5740     }
5741 
5742     // If profitable add it to ProfitableVF list.
5743     if (isMoreProfitable(Candidate, ScalarCost))
5744       ProfitableVFs.push_back(Candidate);
5745 
5746     if (isMoreProfitable(Candidate, ChosenFactor))
5747       ChosenFactor = Candidate;
5748   }
5749 
5750   // Emit a report of VFs with invalid costs in the loop.
5751   if (!InvalidCosts.empty()) {
5752     // Group the remarks per instruction, keeping the instruction order from
5753     // InvalidCosts.
5754     std::map<Instruction *, unsigned> Numbering;
5755     unsigned I = 0;
5756     for (auto &Pair : InvalidCosts)
5757       if (!Numbering.count(Pair.first))
5758         Numbering[Pair.first] = I++;
5759 
5760     // Sort the list, first on instruction(number) then on VF.
5761     llvm::sort(InvalidCosts,
5762                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5763                  if (Numbering[A.first] != Numbering[B.first])
5764                    return Numbering[A.first] < Numbering[B.first];
5765                  ElementCountComparator ECC;
5766                  return ECC(A.second, B.second);
5767                });
5768 
5769     // For a list of ordered instruction-vf pairs:
5770     //   [(load, vf1), (load, vf2), (store, vf1)]
5771     // Group the instructions together to emit separate remarks for:
5772     //   load  (vf1, vf2)
5773     //   store (vf1)
5774     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5775     auto Subset = ArrayRef<InstructionVFPair>();
5776     do {
5777       if (Subset.empty())
5778         Subset = Tail.take_front(1);
5779 
5780       Instruction *I = Subset.front().first;
5781 
5782       // If the next instruction is different, or if there are no other pairs,
5783       // emit a remark for the collated subset. e.g.
5784       //   [(load, vf1), (load, vf2))]
5785       // to emit:
5786       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5787       if (Subset == Tail || Tail[Subset.size()].first != I) {
5788         std::string OutString;
5789         raw_string_ostream OS(OutString);
5790         assert(!Subset.empty() && "Unexpected empty range");
5791         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5792         for (auto &Pair : Subset)
5793           OS << (Pair.second == Subset.front().second ? "" : ", ")
5794              << Pair.second;
5795         OS << "):";
5796         if (auto *CI = dyn_cast<CallInst>(I))
5797           OS << " call to " << CI->getCalledFunction()->getName();
5798         else
5799           OS << " " << I->getOpcodeName();
5800         OS.flush();
5801         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5802         Tail = Tail.drop_front(Subset.size());
5803         Subset = {};
5804       } else
5805         // Grow the subset by one element
5806         Subset = Tail.take_front(Subset.size() + 1);
5807     } while (!Tail.empty());
5808   }
5809 
5810   if (!EnableCondStoresVectorization && NumPredStores) {
5811     reportVectorizationFailure("There are conditional stores.",
5812         "store that is conditionally executed prevents vectorization",
5813         "ConditionalStore", ORE, TheLoop);
5814     ChosenFactor = ScalarCost;
5815   }
5816 
5817   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5818                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5819              << "LV: Vectorization seems to be not beneficial, "
5820              << "but was forced by a user.\n");
5821   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5822   return ChosenFactor;
5823 }
5824 
5825 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5826     const Loop &L, ElementCount VF) const {
5827   // Cross iteration phis such as reductions need special handling and are
5828   // currently unsupported.
5829   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5830         return Legal->isFirstOrderRecurrence(&Phi) ||
5831                Legal->isReductionVariable(&Phi);
5832       }))
5833     return false;
5834 
5835   // Phis with uses outside of the loop require special handling and are
5836   // currently unsupported.
5837   for (auto &Entry : Legal->getInductionVars()) {
5838     // Look for uses of the value of the induction at the last iteration.
5839     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5840     for (User *U : PostInc->users())
5841       if (!L.contains(cast<Instruction>(U)))
5842         return false;
5843     // Look for uses of penultimate value of the induction.
5844     for (User *U : Entry.first->users())
5845       if (!L.contains(cast<Instruction>(U)))
5846         return false;
5847   }
5848 
5849   // Induction variables that are widened require special handling that is
5850   // currently not supported.
5851   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5852         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5853                  this->isProfitableToScalarize(Entry.first, VF));
5854       }))
5855     return false;
5856 
5857   // Epilogue vectorization code has not been auditted to ensure it handles
5858   // non-latch exits properly.  It may be fine, but it needs auditted and
5859   // tested.
5860   if (L.getExitingBlock() != L.getLoopLatch())
5861     return false;
5862 
5863   return true;
5864 }
5865 
5866 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5867     const ElementCount VF) const {
5868   // FIXME: We need a much better cost-model to take different parameters such
5869   // as register pressure, code size increase and cost of extra branches into
5870   // account. For now we apply a very crude heuristic and only consider loops
5871   // with vectorization factors larger than a certain value.
5872   // We also consider epilogue vectorization unprofitable for targets that don't
5873   // consider interleaving beneficial (eg. MVE).
5874   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5875     return false;
5876   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5877     return true;
5878   return false;
5879 }
5880 
5881 VectorizationFactor
5882 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5883     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5884   VectorizationFactor Result = VectorizationFactor::Disabled();
5885   if (!EnableEpilogueVectorization) {
5886     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5887     return Result;
5888   }
5889 
5890   if (!isScalarEpilogueAllowed()) {
5891     LLVM_DEBUG(
5892         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5893                   "allowed.\n";);
5894     return Result;
5895   }
5896 
5897   // Not really a cost consideration, but check for unsupported cases here to
5898   // simplify the logic.
5899   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5900     LLVM_DEBUG(
5901         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5902                   "not a supported candidate.\n";);
5903     return Result;
5904   }
5905 
5906   if (EpilogueVectorizationForceVF > 1) {
5907     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5908     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5909     if (LVP.hasPlanWithVF(ForcedEC))
5910       return {ForcedEC, 0};
5911     else {
5912       LLVM_DEBUG(
5913           dbgs()
5914               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5915       return Result;
5916     }
5917   }
5918 
5919   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5920       TheLoop->getHeader()->getParent()->hasMinSize()) {
5921     LLVM_DEBUG(
5922         dbgs()
5923             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5924     return Result;
5925   }
5926 
5927   auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5928   if (MainLoopVF.isScalable())
5929     LLVM_DEBUG(
5930         dbgs() << "LEV: Epilogue vectorization using scalable vectors not "
5931                   "yet supported. Converting to fixed-width (VF="
5932                << FixedMainLoopVF << ") instead\n");
5933 
5934   if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) {
5935     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5936                          "this loop\n");
5937     return Result;
5938   }
5939 
5940   for (auto &NextVF : ProfitableVFs)
5941     if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) &&
5942         (Result.Width.getFixedValue() == 1 ||
5943          isMoreProfitable(NextVF, Result)) &&
5944         LVP.hasPlanWithVF(NextVF.Width))
5945       Result = NextVF;
5946 
5947   if (Result != VectorizationFactor::Disabled())
5948     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5949                       << Result.Width.getFixedValue() << "\n";);
5950   return Result;
5951 }
5952 
5953 std::pair<unsigned, unsigned>
5954 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5955   unsigned MinWidth = -1U;
5956   unsigned MaxWidth = 8;
5957   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5958   // For in-loop reductions, no element types are added to ElementTypesInLoop
5959   // if there are no loads/stores in the loop. In this case, check through the
5960   // reduction variables to determine the maximum width.
5961   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5962     // Reset MaxWidth so that we can find the smallest type used by recurrences
5963     // in the loop.
5964     MaxWidth = -1U;
5965     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5966       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5967       // When finding the min width used by the recurrence we need to account
5968       // for casts on the input operands of the recurrence.
5969       MaxWidth = std::min<unsigned>(
5970           MaxWidth, std::min<unsigned>(
5971                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5972                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5973     }
5974   } else {
5975     for (Type *T : ElementTypesInLoop) {
5976       MinWidth = std::min<unsigned>(
5977           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5978       MaxWidth = std::max<unsigned>(
5979           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5980     }
5981   }
5982   return {MinWidth, MaxWidth};
5983 }
5984 
5985 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5986   ElementTypesInLoop.clear();
5987   // For each block.
5988   for (BasicBlock *BB : TheLoop->blocks()) {
5989     // For each instruction in the loop.
5990     for (Instruction &I : BB->instructionsWithoutDebug()) {
5991       Type *T = I.getType();
5992 
5993       // Skip ignored values.
5994       if (ValuesToIgnore.count(&I))
5995         continue;
5996 
5997       // Only examine Loads, Stores and PHINodes.
5998       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5999         continue;
6000 
6001       // Examine PHI nodes that are reduction variables. Update the type to
6002       // account for the recurrence type.
6003       if (auto *PN = dyn_cast<PHINode>(&I)) {
6004         if (!Legal->isReductionVariable(PN))
6005           continue;
6006         const RecurrenceDescriptor &RdxDesc =
6007             Legal->getReductionVars().find(PN)->second;
6008         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6009             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6010                                       RdxDesc.getRecurrenceType(),
6011                                       TargetTransformInfo::ReductionFlags()))
6012           continue;
6013         T = RdxDesc.getRecurrenceType();
6014       }
6015 
6016       // Examine the stored values.
6017       if (auto *ST = dyn_cast<StoreInst>(&I))
6018         T = ST->getValueOperand()->getType();
6019 
6020       assert(T->isSized() &&
6021              "Expected the load/store/recurrence type to be sized");
6022 
6023       ElementTypesInLoop.insert(T);
6024     }
6025   }
6026 }
6027 
6028 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6029                                                            unsigned LoopCost) {
6030   // -- The interleave heuristics --
6031   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6032   // There are many micro-architectural considerations that we can't predict
6033   // at this level. For example, frontend pressure (on decode or fetch) due to
6034   // code size, or the number and capabilities of the execution ports.
6035   //
6036   // We use the following heuristics to select the interleave count:
6037   // 1. If the code has reductions, then we interleave to break the cross
6038   // iteration dependency.
6039   // 2. If the loop is really small, then we interleave to reduce the loop
6040   // overhead.
6041   // 3. We don't interleave if we think that we will spill registers to memory
6042   // due to the increased register pressure.
6043 
6044   if (!isScalarEpilogueAllowed())
6045     return 1;
6046 
6047   // We used the distance for the interleave count.
6048   if (Legal->getMaxSafeDepDistBytes() != -1U)
6049     return 1;
6050 
6051   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6052   const bool HasReductions = !Legal->getReductionVars().empty();
6053   // Do not interleave loops with a relatively small known or estimated trip
6054   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6055   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6056   // because with the above conditions interleaving can expose ILP and break
6057   // cross iteration dependences for reductions.
6058   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6059       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6060     return 1;
6061 
6062   RegisterUsage R = calculateRegisterUsage({VF})[0];
6063   // We divide by these constants so assume that we have at least one
6064   // instruction that uses at least one register.
6065   for (auto& pair : R.MaxLocalUsers) {
6066     pair.second = std::max(pair.second, 1U);
6067   }
6068 
6069   // We calculate the interleave count using the following formula.
6070   // Subtract the number of loop invariants from the number of available
6071   // registers. These registers are used by all of the interleaved instances.
6072   // Next, divide the remaining registers by the number of registers that is
6073   // required by the loop, in order to estimate how many parallel instances
6074   // fit without causing spills. All of this is rounded down if necessary to be
6075   // a power of two. We want power of two interleave count to simplify any
6076   // addressing operations or alignment considerations.
6077   // We also want power of two interleave counts to ensure that the induction
6078   // variable of the vector loop wraps to zero, when tail is folded by masking;
6079   // this currently happens when OptForSize, in which case IC is set to 1 above.
6080   unsigned IC = UINT_MAX;
6081 
6082   for (auto& pair : R.MaxLocalUsers) {
6083     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6084     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6085                       << " registers of "
6086                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6087     if (VF.isScalar()) {
6088       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6089         TargetNumRegisters = ForceTargetNumScalarRegs;
6090     } else {
6091       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6092         TargetNumRegisters = ForceTargetNumVectorRegs;
6093     }
6094     unsigned MaxLocalUsers = pair.second;
6095     unsigned LoopInvariantRegs = 0;
6096     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6097       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6098 
6099     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6100     // Don't count the induction variable as interleaved.
6101     if (EnableIndVarRegisterHeur) {
6102       TmpIC =
6103           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6104                         std::max(1U, (MaxLocalUsers - 1)));
6105     }
6106 
6107     IC = std::min(IC, TmpIC);
6108   }
6109 
6110   // Clamp the interleave ranges to reasonable counts.
6111   unsigned MaxInterleaveCount =
6112       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6113 
6114   // Check if the user has overridden the max.
6115   if (VF.isScalar()) {
6116     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6117       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6118   } else {
6119     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6120       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6121   }
6122 
6123   // If trip count is known or estimated compile time constant, limit the
6124   // interleave count to be less than the trip count divided by VF, provided it
6125   // is at least 1.
6126   //
6127   // For scalable vectors we can't know if interleaving is beneficial. It may
6128   // not be beneficial for small loops if none of the lanes in the second vector
6129   // iterations is enabled. However, for larger loops, there is likely to be a
6130   // similar benefit as for fixed-width vectors. For now, we choose to leave
6131   // the InterleaveCount as if vscale is '1', although if some information about
6132   // the vector is known (e.g. min vector size), we can make a better decision.
6133   if (BestKnownTC) {
6134     MaxInterleaveCount =
6135         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6136     // Make sure MaxInterleaveCount is greater than 0.
6137     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6138   }
6139 
6140   assert(MaxInterleaveCount > 0 &&
6141          "Maximum interleave count must be greater than 0");
6142 
6143   // Clamp the calculated IC to be between the 1 and the max interleave count
6144   // that the target and trip count allows.
6145   if (IC > MaxInterleaveCount)
6146     IC = MaxInterleaveCount;
6147   else
6148     // Make sure IC is greater than 0.
6149     IC = std::max(1u, IC);
6150 
6151   assert(IC > 0 && "Interleave count must be greater than 0.");
6152 
6153   // If we did not calculate the cost for VF (because the user selected the VF)
6154   // then we calculate the cost of VF here.
6155   if (LoopCost == 0) {
6156     InstructionCost C = expectedCost(VF).first;
6157     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
6158     LoopCost = *C.getValue();
6159   }
6160 
6161   assert(LoopCost && "Non-zero loop cost expected");
6162 
6163   // Interleave if we vectorized this loop and there is a reduction that could
6164   // benefit from interleaving.
6165   if (VF.isVector() && HasReductions) {
6166     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6167     return IC;
6168   }
6169 
6170   // Note that if we've already vectorized the loop we will have done the
6171   // runtime check and so interleaving won't require further checks.
6172   bool InterleavingRequiresRuntimePointerCheck =
6173       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6174 
6175   // We want to interleave small loops in order to reduce the loop overhead and
6176   // potentially expose ILP opportunities.
6177   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6178                     << "LV: IC is " << IC << '\n'
6179                     << "LV: VF is " << VF << '\n');
6180   const bool AggressivelyInterleaveReductions =
6181       TTI.enableAggressiveInterleaving(HasReductions);
6182   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6183     // We assume that the cost overhead is 1 and we use the cost model
6184     // to estimate the cost of the loop and interleave until the cost of the
6185     // loop overhead is about 5% of the cost of the loop.
6186     unsigned SmallIC =
6187         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6188 
6189     // Interleave until store/load ports (estimated by max interleave count) are
6190     // saturated.
6191     unsigned NumStores = Legal->getNumStores();
6192     unsigned NumLoads = Legal->getNumLoads();
6193     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6194     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6195 
6196     // There is little point in interleaving for reductions containing selects
6197     // and compares when VF=1 since it may just create more overhead than it's
6198     // worth for loops with small trip counts. This is because we still have to
6199     // do the final reduction after the loop.
6200     bool HasSelectCmpReductions =
6201         HasReductions &&
6202         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6203           const RecurrenceDescriptor &RdxDesc = Reduction.second;
6204           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
6205               RdxDesc.getRecurrenceKind());
6206         });
6207     if (HasSelectCmpReductions) {
6208       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
6209       return 1;
6210     }
6211 
6212     // If we have a scalar reduction (vector reductions are already dealt with
6213     // by this point), we can increase the critical path length if the loop
6214     // we're interleaving is inside another loop. For tree-wise reductions
6215     // set the limit to 2, and for ordered reductions it's best to disable
6216     // interleaving entirely.
6217     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6218       bool HasOrderedReductions =
6219           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6220             const RecurrenceDescriptor &RdxDesc = Reduction.second;
6221             return RdxDesc.isOrdered();
6222           });
6223       if (HasOrderedReductions) {
6224         LLVM_DEBUG(
6225             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
6226         return 1;
6227       }
6228 
6229       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6230       SmallIC = std::min(SmallIC, F);
6231       StoresIC = std::min(StoresIC, F);
6232       LoadsIC = std::min(LoadsIC, F);
6233     }
6234 
6235     if (EnableLoadStoreRuntimeInterleave &&
6236         std::max(StoresIC, LoadsIC) > SmallIC) {
6237       LLVM_DEBUG(
6238           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6239       return std::max(StoresIC, LoadsIC);
6240     }
6241 
6242     // If there are scalar reductions and TTI has enabled aggressive
6243     // interleaving for reductions, we will interleave to expose ILP.
6244     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6245         AggressivelyInterleaveReductions) {
6246       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6247       // Interleave no less than SmallIC but not as aggressive as the normal IC
6248       // to satisfy the rare situation when resources are too limited.
6249       return std::max(IC / 2, SmallIC);
6250     } else {
6251       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6252       return SmallIC;
6253     }
6254   }
6255 
6256   // Interleave if this is a large loop (small loops are already dealt with by
6257   // this point) that could benefit from interleaving.
6258   if (AggressivelyInterleaveReductions) {
6259     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6260     return IC;
6261   }
6262 
6263   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6264   return 1;
6265 }
6266 
6267 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6268 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6269   // This function calculates the register usage by measuring the highest number
6270   // of values that are alive at a single location. Obviously, this is a very
6271   // rough estimation. We scan the loop in a topological order in order and
6272   // assign a number to each instruction. We use RPO to ensure that defs are
6273   // met before their users. We assume that each instruction that has in-loop
6274   // users starts an interval. We record every time that an in-loop value is
6275   // used, so we have a list of the first and last occurrences of each
6276   // instruction. Next, we transpose this data structure into a multi map that
6277   // holds the list of intervals that *end* at a specific location. This multi
6278   // map allows us to perform a linear search. We scan the instructions linearly
6279   // and record each time that a new interval starts, by placing it in a set.
6280   // If we find this value in the multi-map then we remove it from the set.
6281   // The max register usage is the maximum size of the set.
6282   // We also search for instructions that are defined outside the loop, but are
6283   // used inside the loop. We need this number separately from the max-interval
6284   // usage number because when we unroll, loop-invariant values do not take
6285   // more register.
6286   LoopBlocksDFS DFS(TheLoop);
6287   DFS.perform(LI);
6288 
6289   RegisterUsage RU;
6290 
6291   // Each 'key' in the map opens a new interval. The values
6292   // of the map are the index of the 'last seen' usage of the
6293   // instruction that is the key.
6294   using IntervalMap = DenseMap<Instruction *, unsigned>;
6295 
6296   // Maps instruction to its index.
6297   SmallVector<Instruction *, 64> IdxToInstr;
6298   // Marks the end of each interval.
6299   IntervalMap EndPoint;
6300   // Saves the list of instruction indices that are used in the loop.
6301   SmallPtrSet<Instruction *, 8> Ends;
6302   // Saves the list of values that are used in the loop but are
6303   // defined outside the loop, such as arguments and constants.
6304   SmallPtrSet<Value *, 8> LoopInvariants;
6305 
6306   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6307     for (Instruction &I : BB->instructionsWithoutDebug()) {
6308       IdxToInstr.push_back(&I);
6309 
6310       // Save the end location of each USE.
6311       for (Value *U : I.operands()) {
6312         auto *Instr = dyn_cast<Instruction>(U);
6313 
6314         // Ignore non-instruction values such as arguments, constants, etc.
6315         if (!Instr)
6316           continue;
6317 
6318         // If this instruction is outside the loop then record it and continue.
6319         if (!TheLoop->contains(Instr)) {
6320           LoopInvariants.insert(Instr);
6321           continue;
6322         }
6323 
6324         // Overwrite previous end points.
6325         EndPoint[Instr] = IdxToInstr.size();
6326         Ends.insert(Instr);
6327       }
6328     }
6329   }
6330 
6331   // Saves the list of intervals that end with the index in 'key'.
6332   using InstrList = SmallVector<Instruction *, 2>;
6333   DenseMap<unsigned, InstrList> TransposeEnds;
6334 
6335   // Transpose the EndPoints to a list of values that end at each index.
6336   for (auto &Interval : EndPoint)
6337     TransposeEnds[Interval.second].push_back(Interval.first);
6338 
6339   SmallPtrSet<Instruction *, 8> OpenIntervals;
6340   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6341   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6342 
6343   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6344 
6345   // A lambda that gets the register usage for the given type and VF.
6346   const auto &TTICapture = TTI;
6347   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6348     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6349       return 0;
6350     InstructionCost::CostType RegUsage =
6351         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6352     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6353            "Nonsensical values for register usage.");
6354     return RegUsage;
6355   };
6356 
6357   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6358     Instruction *I = IdxToInstr[i];
6359 
6360     // Remove all of the instructions that end at this location.
6361     InstrList &List = TransposeEnds[i];
6362     for (Instruction *ToRemove : List)
6363       OpenIntervals.erase(ToRemove);
6364 
6365     // Ignore instructions that are never used within the loop.
6366     if (!Ends.count(I))
6367       continue;
6368 
6369     // Skip ignored values.
6370     if (ValuesToIgnore.count(I))
6371       continue;
6372 
6373     // For each VF find the maximum usage of registers.
6374     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6375       // Count the number of live intervals.
6376       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6377 
6378       if (VFs[j].isScalar()) {
6379         for (auto Inst : OpenIntervals) {
6380           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6381           if (RegUsage.find(ClassID) == RegUsage.end())
6382             RegUsage[ClassID] = 1;
6383           else
6384             RegUsage[ClassID] += 1;
6385         }
6386       } else {
6387         collectUniformsAndScalars(VFs[j]);
6388         for (auto Inst : OpenIntervals) {
6389           // Skip ignored values for VF > 1.
6390           if (VecValuesToIgnore.count(Inst))
6391             continue;
6392           if (isScalarAfterVectorization(Inst, VFs[j])) {
6393             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6394             if (RegUsage.find(ClassID) == RegUsage.end())
6395               RegUsage[ClassID] = 1;
6396             else
6397               RegUsage[ClassID] += 1;
6398           } else {
6399             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6400             if (RegUsage.find(ClassID) == RegUsage.end())
6401               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6402             else
6403               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6404           }
6405         }
6406       }
6407 
6408       for (auto& pair : RegUsage) {
6409         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6410           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6411         else
6412           MaxUsages[j][pair.first] = pair.second;
6413       }
6414     }
6415 
6416     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6417                       << OpenIntervals.size() << '\n');
6418 
6419     // Add the current instruction to the list of open intervals.
6420     OpenIntervals.insert(I);
6421   }
6422 
6423   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6424     SmallMapVector<unsigned, unsigned, 4> Invariant;
6425 
6426     for (auto Inst : LoopInvariants) {
6427       unsigned Usage =
6428           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6429       unsigned ClassID =
6430           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6431       if (Invariant.find(ClassID) == Invariant.end())
6432         Invariant[ClassID] = Usage;
6433       else
6434         Invariant[ClassID] += Usage;
6435     }
6436 
6437     LLVM_DEBUG({
6438       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6439       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6440              << " item\n";
6441       for (const auto &pair : MaxUsages[i]) {
6442         dbgs() << "LV(REG): RegisterClass: "
6443                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6444                << " registers\n";
6445       }
6446       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6447              << " item\n";
6448       for (const auto &pair : Invariant) {
6449         dbgs() << "LV(REG): RegisterClass: "
6450                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6451                << " registers\n";
6452       }
6453     });
6454 
6455     RU.LoopInvariantRegs = Invariant;
6456     RU.MaxLocalUsers = MaxUsages[i];
6457     RUs[i] = RU;
6458   }
6459 
6460   return RUs;
6461 }
6462 
6463 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6464                                                            ElementCount VF) {
6465   // TODO: Cost model for emulated masked load/store is completely
6466   // broken. This hack guides the cost model to use an artificially
6467   // high enough value to practically disable vectorization with such
6468   // operations, except where previously deployed legality hack allowed
6469   // using very low cost values. This is to avoid regressions coming simply
6470   // from moving "masked load/store" check from legality to cost model.
6471   // Masked Load/Gather emulation was previously never allowed.
6472   // Limited number of Masked Store/Scatter emulation was allowed.
6473   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6474   return isa<LoadInst>(I) ||
6475          (isa<StoreInst>(I) &&
6476           NumPredStores > NumberOfStoresToPredicate);
6477 }
6478 
6479 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6480   // If we aren't vectorizing the loop, or if we've already collected the
6481   // instructions to scalarize, there's nothing to do. Collection may already
6482   // have occurred if we have a user-selected VF and are now computing the
6483   // expected cost for interleaving.
6484   if (VF.isScalar() || VF.isZero() ||
6485       InstsToScalarize.find(VF) != InstsToScalarize.end())
6486     return;
6487 
6488   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6489   // not profitable to scalarize any instructions, the presence of VF in the
6490   // map will indicate that we've analyzed it already.
6491   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6492 
6493   // Find all the instructions that are scalar with predication in the loop and
6494   // determine if it would be better to not if-convert the blocks they are in.
6495   // If so, we also record the instructions to scalarize.
6496   for (BasicBlock *BB : TheLoop->blocks()) {
6497     if (!blockNeedsPredicationForAnyReason(BB))
6498       continue;
6499     for (Instruction &I : *BB)
6500       if (isScalarWithPredication(&I, VF)) {
6501         ScalarCostsTy ScalarCosts;
6502         // Do not apply discount if scalable, because that would lead to
6503         // invalid scalarization costs.
6504         // Do not apply discount logic if hacked cost is needed
6505         // for emulated masked memrefs.
6506         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6507             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6508           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6509         // Remember that BB will remain after vectorization.
6510         PredicatedBBsAfterVectorization.insert(BB);
6511       }
6512   }
6513 }
6514 
6515 int LoopVectorizationCostModel::computePredInstDiscount(
6516     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6517   assert(!isUniformAfterVectorization(PredInst, VF) &&
6518          "Instruction marked uniform-after-vectorization will be predicated");
6519 
6520   // Initialize the discount to zero, meaning that the scalar version and the
6521   // vector version cost the same.
6522   InstructionCost Discount = 0;
6523 
6524   // Holds instructions to analyze. The instructions we visit are mapped in
6525   // ScalarCosts. Those instructions are the ones that would be scalarized if
6526   // we find that the scalar version costs less.
6527   SmallVector<Instruction *, 8> Worklist;
6528 
6529   // Returns true if the given instruction can be scalarized.
6530   auto canBeScalarized = [&](Instruction *I) -> bool {
6531     // We only attempt to scalarize instructions forming a single-use chain
6532     // from the original predicated block that would otherwise be vectorized.
6533     // Although not strictly necessary, we give up on instructions we know will
6534     // already be scalar to avoid traversing chains that are unlikely to be
6535     // beneficial.
6536     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6537         isScalarAfterVectorization(I, VF))
6538       return false;
6539 
6540     // If the instruction is scalar with predication, it will be analyzed
6541     // separately. We ignore it within the context of PredInst.
6542     if (isScalarWithPredication(I, VF))
6543       return false;
6544 
6545     // If any of the instruction's operands are uniform after vectorization,
6546     // the instruction cannot be scalarized. This prevents, for example, a
6547     // masked load from being scalarized.
6548     //
6549     // We assume we will only emit a value for lane zero of an instruction
6550     // marked uniform after vectorization, rather than VF identical values.
6551     // Thus, if we scalarize an instruction that uses a uniform, we would
6552     // create uses of values corresponding to the lanes we aren't emitting code
6553     // for. This behavior can be changed by allowing getScalarValue to clone
6554     // the lane zero values for uniforms rather than asserting.
6555     for (Use &U : I->operands())
6556       if (auto *J = dyn_cast<Instruction>(U.get()))
6557         if (isUniformAfterVectorization(J, VF))
6558           return false;
6559 
6560     // Otherwise, we can scalarize the instruction.
6561     return true;
6562   };
6563 
6564   // Compute the expected cost discount from scalarizing the entire expression
6565   // feeding the predicated instruction. We currently only consider expressions
6566   // that are single-use instruction chains.
6567   Worklist.push_back(PredInst);
6568   while (!Worklist.empty()) {
6569     Instruction *I = Worklist.pop_back_val();
6570 
6571     // If we've already analyzed the instruction, there's nothing to do.
6572     if (ScalarCosts.find(I) != ScalarCosts.end())
6573       continue;
6574 
6575     // Compute the cost of the vector instruction. Note that this cost already
6576     // includes the scalarization overhead of the predicated instruction.
6577     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6578 
6579     // Compute the cost of the scalarized instruction. This cost is the cost of
6580     // the instruction as if it wasn't if-converted and instead remained in the
6581     // predicated block. We will scale this cost by block probability after
6582     // computing the scalarization overhead.
6583     InstructionCost ScalarCost =
6584         VF.getFixedValue() *
6585         getInstructionCost(I, ElementCount::getFixed(1)).first;
6586 
6587     // Compute the scalarization overhead of needed insertelement instructions
6588     // and phi nodes.
6589     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6590       ScalarCost += TTI.getScalarizationOverhead(
6591           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6592           APInt::getAllOnes(VF.getFixedValue()), true, false);
6593       ScalarCost +=
6594           VF.getFixedValue() *
6595           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6596     }
6597 
6598     // Compute the scalarization overhead of needed extractelement
6599     // instructions. For each of the instruction's operands, if the operand can
6600     // be scalarized, add it to the worklist; otherwise, account for the
6601     // overhead.
6602     for (Use &U : I->operands())
6603       if (auto *J = dyn_cast<Instruction>(U.get())) {
6604         assert(VectorType::isValidElementType(J->getType()) &&
6605                "Instruction has non-scalar type");
6606         if (canBeScalarized(J))
6607           Worklist.push_back(J);
6608         else if (needsExtract(J, VF)) {
6609           ScalarCost += TTI.getScalarizationOverhead(
6610               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6611               APInt::getAllOnes(VF.getFixedValue()), false, true);
6612         }
6613       }
6614 
6615     // Scale the total scalar cost by block probability.
6616     ScalarCost /= getReciprocalPredBlockProb();
6617 
6618     // Compute the discount. A non-negative discount means the vector version
6619     // of the instruction costs more, and scalarizing would be beneficial.
6620     Discount += VectorCost - ScalarCost;
6621     ScalarCosts[I] = ScalarCost;
6622   }
6623 
6624   return *Discount.getValue();
6625 }
6626 
6627 LoopVectorizationCostModel::VectorizationCostTy
6628 LoopVectorizationCostModel::expectedCost(
6629     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6630   VectorizationCostTy Cost;
6631 
6632   // For each block.
6633   for (BasicBlock *BB : TheLoop->blocks()) {
6634     VectorizationCostTy BlockCost;
6635 
6636     // For each instruction in the old loop.
6637     for (Instruction &I : BB->instructionsWithoutDebug()) {
6638       // Skip ignored values.
6639       if (ValuesToIgnore.count(&I) ||
6640           (VF.isVector() && VecValuesToIgnore.count(&I)))
6641         continue;
6642 
6643       VectorizationCostTy C = getInstructionCost(&I, VF);
6644 
6645       // Check if we should override the cost.
6646       if (C.first.isValid() &&
6647           ForceTargetInstructionCost.getNumOccurrences() > 0)
6648         C.first = InstructionCost(ForceTargetInstructionCost);
6649 
6650       // Keep a list of instructions with invalid costs.
6651       if (Invalid && !C.first.isValid())
6652         Invalid->emplace_back(&I, VF);
6653 
6654       BlockCost.first += C.first;
6655       BlockCost.second |= C.second;
6656       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6657                         << " for VF " << VF << " For instruction: " << I
6658                         << '\n');
6659     }
6660 
6661     // If we are vectorizing a predicated block, it will have been
6662     // if-converted. This means that the block's instructions (aside from
6663     // stores and instructions that may divide by zero) will now be
6664     // unconditionally executed. For the scalar case, we may not always execute
6665     // the predicated block, if it is an if-else block. Thus, scale the block's
6666     // cost by the probability of executing it. blockNeedsPredication from
6667     // Legal is used so as to not include all blocks in tail folded loops.
6668     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6669       BlockCost.first /= getReciprocalPredBlockProb();
6670 
6671     Cost.first += BlockCost.first;
6672     Cost.second |= BlockCost.second;
6673   }
6674 
6675   return Cost;
6676 }
6677 
6678 /// Gets Address Access SCEV after verifying that the access pattern
6679 /// is loop invariant except the induction variable dependence.
6680 ///
6681 /// This SCEV can be sent to the Target in order to estimate the address
6682 /// calculation cost.
6683 static const SCEV *getAddressAccessSCEV(
6684               Value *Ptr,
6685               LoopVectorizationLegality *Legal,
6686               PredicatedScalarEvolution &PSE,
6687               const Loop *TheLoop) {
6688 
6689   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6690   if (!Gep)
6691     return nullptr;
6692 
6693   // We are looking for a gep with all loop invariant indices except for one
6694   // which should be an induction variable.
6695   auto SE = PSE.getSE();
6696   unsigned NumOperands = Gep->getNumOperands();
6697   for (unsigned i = 1; i < NumOperands; ++i) {
6698     Value *Opd = Gep->getOperand(i);
6699     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6700         !Legal->isInductionVariable(Opd))
6701       return nullptr;
6702   }
6703 
6704   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6705   return PSE.getSCEV(Ptr);
6706 }
6707 
6708 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6709   return Legal->hasStride(I->getOperand(0)) ||
6710          Legal->hasStride(I->getOperand(1));
6711 }
6712 
6713 InstructionCost
6714 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6715                                                         ElementCount VF) {
6716   assert(VF.isVector() &&
6717          "Scalarization cost of instruction implies vectorization.");
6718   if (VF.isScalable())
6719     return InstructionCost::getInvalid();
6720 
6721   Type *ValTy = getLoadStoreType(I);
6722   auto SE = PSE.getSE();
6723 
6724   unsigned AS = getLoadStoreAddressSpace(I);
6725   Value *Ptr = getLoadStorePointerOperand(I);
6726   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6727   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6728   //       that it is being called from this specific place.
6729 
6730   // Figure out whether the access is strided and get the stride value
6731   // if it's known in compile time
6732   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6733 
6734   // Get the cost of the scalar memory instruction and address computation.
6735   InstructionCost Cost =
6736       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6737 
6738   // Don't pass *I here, since it is scalar but will actually be part of a
6739   // vectorized loop where the user of it is a vectorized instruction.
6740   const Align Alignment = getLoadStoreAlignment(I);
6741   Cost += VF.getKnownMinValue() *
6742           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6743                               AS, TTI::TCK_RecipThroughput);
6744 
6745   // Get the overhead of the extractelement and insertelement instructions
6746   // we might create due to scalarization.
6747   Cost += getScalarizationOverhead(I, VF);
6748 
6749   // If we have a predicated load/store, it will need extra i1 extracts and
6750   // conditional branches, but may not be executed for each vector lane. Scale
6751   // the cost by the probability of executing the predicated block.
6752   if (isPredicatedInst(I, VF)) {
6753     Cost /= getReciprocalPredBlockProb();
6754 
6755     // Add the cost of an i1 extract and a branch
6756     auto *Vec_i1Ty =
6757         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6758     Cost += TTI.getScalarizationOverhead(
6759         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6760         /*Insert=*/false, /*Extract=*/true);
6761     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6762 
6763     if (useEmulatedMaskMemRefHack(I, VF))
6764       // Artificially setting to a high enough value to practically disable
6765       // vectorization with such operations.
6766       Cost = 3000000;
6767   }
6768 
6769   return Cost;
6770 }
6771 
6772 InstructionCost
6773 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6774                                                     ElementCount VF) {
6775   Type *ValTy = getLoadStoreType(I);
6776   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6777   Value *Ptr = getLoadStorePointerOperand(I);
6778   unsigned AS = getLoadStoreAddressSpace(I);
6779   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6780   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6781 
6782   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6783          "Stride should be 1 or -1 for consecutive memory access");
6784   const Align Alignment = getLoadStoreAlignment(I);
6785   InstructionCost Cost = 0;
6786   if (Legal->isMaskRequired(I))
6787     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6788                                       CostKind);
6789   else
6790     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6791                                 CostKind, I);
6792 
6793   bool Reverse = ConsecutiveStride < 0;
6794   if (Reverse)
6795     Cost +=
6796         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6797   return Cost;
6798 }
6799 
6800 InstructionCost
6801 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6802                                                 ElementCount VF) {
6803   assert(Legal->isUniformMemOp(*I));
6804 
6805   Type *ValTy = getLoadStoreType(I);
6806   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6807   const Align Alignment = getLoadStoreAlignment(I);
6808   unsigned AS = getLoadStoreAddressSpace(I);
6809   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6810   if (isa<LoadInst>(I)) {
6811     return TTI.getAddressComputationCost(ValTy) +
6812            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6813                                CostKind) +
6814            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6815   }
6816   StoreInst *SI = cast<StoreInst>(I);
6817 
6818   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6819   return TTI.getAddressComputationCost(ValTy) +
6820          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6821                              CostKind) +
6822          (isLoopInvariantStoreValue
6823               ? 0
6824               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6825                                        VF.getKnownMinValue() - 1));
6826 }
6827 
6828 InstructionCost
6829 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6830                                                  ElementCount VF) {
6831   Type *ValTy = getLoadStoreType(I);
6832   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6833   const Align Alignment = getLoadStoreAlignment(I);
6834   const Value *Ptr = getLoadStorePointerOperand(I);
6835 
6836   return TTI.getAddressComputationCost(VectorTy) +
6837          TTI.getGatherScatterOpCost(
6838              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6839              TargetTransformInfo::TCK_RecipThroughput, I);
6840 }
6841 
6842 InstructionCost
6843 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6844                                                    ElementCount VF) {
6845   // TODO: Once we have support for interleaving with scalable vectors
6846   // we can calculate the cost properly here.
6847   if (VF.isScalable())
6848     return InstructionCost::getInvalid();
6849 
6850   Type *ValTy = getLoadStoreType(I);
6851   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6852   unsigned AS = getLoadStoreAddressSpace(I);
6853 
6854   auto Group = getInterleavedAccessGroup(I);
6855   assert(Group && "Fail to get an interleaved access group.");
6856 
6857   unsigned InterleaveFactor = Group->getFactor();
6858   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6859 
6860   // Holds the indices of existing members in the interleaved group.
6861   SmallVector<unsigned, 4> Indices;
6862   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6863     if (Group->getMember(IF))
6864       Indices.push_back(IF);
6865 
6866   // Calculate the cost of the whole interleaved group.
6867   bool UseMaskForGaps =
6868       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6869       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6870   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6871       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6872       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6873 
6874   if (Group->isReverse()) {
6875     // TODO: Add support for reversed masked interleaved access.
6876     assert(!Legal->isMaskRequired(I) &&
6877            "Reverse masked interleaved access not supported.");
6878     Cost +=
6879         Group->getNumMembers() *
6880         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6881   }
6882   return Cost;
6883 }
6884 
6885 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6886     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6887   using namespace llvm::PatternMatch;
6888   // Early exit for no inloop reductions
6889   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6890     return None;
6891   auto *VectorTy = cast<VectorType>(Ty);
6892 
6893   // We are looking for a pattern of, and finding the minimal acceptable cost:
6894   //  reduce(mul(ext(A), ext(B))) or
6895   //  reduce(mul(A, B)) or
6896   //  reduce(ext(A)) or
6897   //  reduce(A).
6898   // The basic idea is that we walk down the tree to do that, finding the root
6899   // reduction instruction in InLoopReductionImmediateChains. From there we find
6900   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6901   // of the components. If the reduction cost is lower then we return it for the
6902   // reduction instruction and 0 for the other instructions in the pattern. If
6903   // it is not we return an invalid cost specifying the orignal cost method
6904   // should be used.
6905   Instruction *RetI = I;
6906   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6907     if (!RetI->hasOneUser())
6908       return None;
6909     RetI = RetI->user_back();
6910   }
6911   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6912       RetI->user_back()->getOpcode() == Instruction::Add) {
6913     if (!RetI->hasOneUser())
6914       return None;
6915     RetI = RetI->user_back();
6916   }
6917 
6918   // Test if the found instruction is a reduction, and if not return an invalid
6919   // cost specifying the parent to use the original cost modelling.
6920   if (!InLoopReductionImmediateChains.count(RetI))
6921     return None;
6922 
6923   // Find the reduction this chain is a part of and calculate the basic cost of
6924   // the reduction on its own.
6925   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6926   Instruction *ReductionPhi = LastChain;
6927   while (!isa<PHINode>(ReductionPhi))
6928     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6929 
6930   const RecurrenceDescriptor &RdxDesc =
6931       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6932 
6933   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6934       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6935 
6936   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6937   // normal fmul instruction to the cost of the fadd reduction.
6938   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6939     BaseCost +=
6940         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6941 
6942   // If we're using ordered reductions then we can just return the base cost
6943   // here, since getArithmeticReductionCost calculates the full ordered
6944   // reduction cost when FP reassociation is not allowed.
6945   if (useOrderedReductions(RdxDesc))
6946     return BaseCost;
6947 
6948   // Get the operand that was not the reduction chain and match it to one of the
6949   // patterns, returning the better cost if it is found.
6950   Instruction *RedOp = RetI->getOperand(1) == LastChain
6951                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6952                            : dyn_cast<Instruction>(RetI->getOperand(1));
6953 
6954   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6955 
6956   Instruction *Op0, *Op1;
6957   if (RedOp &&
6958       match(RedOp,
6959             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6960       match(Op0, m_ZExtOrSExt(m_Value())) &&
6961       Op0->getOpcode() == Op1->getOpcode() &&
6962       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6963       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6964       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6965 
6966     // Matched reduce(ext(mul(ext(A), ext(B)))
6967     // Note that the extend opcodes need to all match, or if A==B they will have
6968     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6969     // which is equally fine.
6970     bool IsUnsigned = isa<ZExtInst>(Op0);
6971     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6972     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6973 
6974     InstructionCost ExtCost =
6975         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6976                              TTI::CastContextHint::None, CostKind, Op0);
6977     InstructionCost MulCost =
6978         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6979     InstructionCost Ext2Cost =
6980         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6981                              TTI::CastContextHint::None, CostKind, RedOp);
6982 
6983     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6984         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6985         CostKind);
6986 
6987     if (RedCost.isValid() &&
6988         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6989       return I == RetI ? RedCost : 0;
6990   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6991              !TheLoop->isLoopInvariant(RedOp)) {
6992     // Matched reduce(ext(A))
6993     bool IsUnsigned = isa<ZExtInst>(RedOp);
6994     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6995     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6996         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6997         CostKind);
6998 
6999     InstructionCost ExtCost =
7000         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7001                              TTI::CastContextHint::None, CostKind, RedOp);
7002     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7003       return I == RetI ? RedCost : 0;
7004   } else if (RedOp &&
7005              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
7006     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
7007         Op0->getOpcode() == Op1->getOpcode() &&
7008         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7009       bool IsUnsigned = isa<ZExtInst>(Op0);
7010       Type *Op0Ty = Op0->getOperand(0)->getType();
7011       Type *Op1Ty = Op1->getOperand(0)->getType();
7012       Type *LargestOpTy =
7013           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
7014                                                                     : Op0Ty;
7015       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
7016 
7017       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
7018       // different sizes. We take the largest type as the ext to reduce, and add
7019       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
7020       InstructionCost ExtCost0 = TTI.getCastInstrCost(
7021           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
7022           TTI::CastContextHint::None, CostKind, Op0);
7023       InstructionCost ExtCost1 = TTI.getCastInstrCost(
7024           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
7025           TTI::CastContextHint::None, CostKind, Op1);
7026       InstructionCost MulCost =
7027           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7028 
7029       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7030           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7031           CostKind);
7032       InstructionCost ExtraExtCost = 0;
7033       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
7034         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
7035         ExtraExtCost = TTI.getCastInstrCost(
7036             ExtraExtOp->getOpcode(), ExtType,
7037             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
7038             TTI::CastContextHint::None, CostKind, ExtraExtOp);
7039       }
7040 
7041       if (RedCost.isValid() &&
7042           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
7043         return I == RetI ? RedCost : 0;
7044     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
7045       // Matched reduce(mul())
7046       InstructionCost MulCost =
7047           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7048 
7049       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7050           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7051           CostKind);
7052 
7053       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7054         return I == RetI ? RedCost : 0;
7055     }
7056   }
7057 
7058   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
7059 }
7060 
7061 InstructionCost
7062 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7063                                                      ElementCount VF) {
7064   // Calculate scalar cost only. Vectorization cost should be ready at this
7065   // moment.
7066   if (VF.isScalar()) {
7067     Type *ValTy = getLoadStoreType(I);
7068     const Align Alignment = getLoadStoreAlignment(I);
7069     unsigned AS = getLoadStoreAddressSpace(I);
7070 
7071     return TTI.getAddressComputationCost(ValTy) +
7072            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7073                                TTI::TCK_RecipThroughput, I);
7074   }
7075   return getWideningCost(I, VF);
7076 }
7077 
7078 LoopVectorizationCostModel::VectorizationCostTy
7079 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7080                                                ElementCount VF) {
7081   // If we know that this instruction will remain uniform, check the cost of
7082   // the scalar version.
7083   if (isUniformAfterVectorization(I, VF))
7084     VF = ElementCount::getFixed(1);
7085 
7086   if (VF.isVector() && isProfitableToScalarize(I, VF))
7087     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7088 
7089   // Forced scalars do not have any scalarization overhead.
7090   auto ForcedScalar = ForcedScalars.find(VF);
7091   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7092     auto InstSet = ForcedScalar->second;
7093     if (InstSet.count(I))
7094       return VectorizationCostTy(
7095           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7096            VF.getKnownMinValue()),
7097           false);
7098   }
7099 
7100   Type *VectorTy;
7101   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7102 
7103   bool TypeNotScalarized = false;
7104   if (VF.isVector() && VectorTy->isVectorTy()) {
7105     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
7106     if (NumParts)
7107       TypeNotScalarized = NumParts < VF.getKnownMinValue();
7108     else
7109       C = InstructionCost::getInvalid();
7110   }
7111   return VectorizationCostTy(C, TypeNotScalarized);
7112 }
7113 
7114 InstructionCost
7115 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7116                                                      ElementCount VF) const {
7117 
7118   // There is no mechanism yet to create a scalable scalarization loop,
7119   // so this is currently Invalid.
7120   if (VF.isScalable())
7121     return InstructionCost::getInvalid();
7122 
7123   if (VF.isScalar())
7124     return 0;
7125 
7126   InstructionCost Cost = 0;
7127   Type *RetTy = ToVectorTy(I->getType(), VF);
7128   if (!RetTy->isVoidTy() &&
7129       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7130     Cost += TTI.getScalarizationOverhead(
7131         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
7132         false);
7133 
7134   // Some targets keep addresses scalar.
7135   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7136     return Cost;
7137 
7138   // Some targets support efficient element stores.
7139   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7140     return Cost;
7141 
7142   // Collect operands to consider.
7143   CallInst *CI = dyn_cast<CallInst>(I);
7144   Instruction::op_range Ops = CI ? CI->args() : I->operands();
7145 
7146   // Skip operands that do not require extraction/scalarization and do not incur
7147   // any overhead.
7148   SmallVector<Type *> Tys;
7149   for (auto *V : filterExtractingOperands(Ops, VF))
7150     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7151   return Cost + TTI.getOperandsScalarizationOverhead(
7152                     filterExtractingOperands(Ops, VF), Tys);
7153 }
7154 
7155 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7156   if (VF.isScalar())
7157     return;
7158   NumPredStores = 0;
7159   for (BasicBlock *BB : TheLoop->blocks()) {
7160     // For each instruction in the old loop.
7161     for (Instruction &I : *BB) {
7162       Value *Ptr =  getLoadStorePointerOperand(&I);
7163       if (!Ptr)
7164         continue;
7165 
7166       // TODO: We should generate better code and update the cost model for
7167       // predicated uniform stores. Today they are treated as any other
7168       // predicated store (see added test cases in
7169       // invariant-store-vectorization.ll).
7170       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
7171         NumPredStores++;
7172 
7173       if (Legal->isUniformMemOp(I)) {
7174         // TODO: Avoid replicating loads and stores instead of
7175         // relying on instcombine to remove them.
7176         // Load: Scalar load + broadcast
7177         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7178         InstructionCost Cost;
7179         if (isa<StoreInst>(&I) && VF.isScalable() &&
7180             isLegalGatherOrScatter(&I, VF)) {
7181           Cost = getGatherScatterCost(&I, VF);
7182           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7183         } else {
7184           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7185                  "Cannot yet scalarize uniform stores");
7186           Cost = getUniformMemOpCost(&I, VF);
7187           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7188         }
7189         continue;
7190       }
7191 
7192       // We assume that widening is the best solution when possible.
7193       if (memoryInstructionCanBeWidened(&I, VF)) {
7194         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7195         int ConsecutiveStride = Legal->isConsecutivePtr(
7196             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
7197         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7198                "Expected consecutive stride.");
7199         InstWidening Decision =
7200             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7201         setWideningDecision(&I, VF, Decision, Cost);
7202         continue;
7203       }
7204 
7205       // Choose between Interleaving, Gather/Scatter or Scalarization.
7206       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7207       unsigned NumAccesses = 1;
7208       if (isAccessInterleaved(&I)) {
7209         auto Group = getInterleavedAccessGroup(&I);
7210         assert(Group && "Fail to get an interleaved access group.");
7211 
7212         // Make one decision for the whole group.
7213         if (getWideningDecision(&I, VF) != CM_Unknown)
7214           continue;
7215 
7216         NumAccesses = Group->getNumMembers();
7217         if (interleavedAccessCanBeWidened(&I, VF))
7218           InterleaveCost = getInterleaveGroupCost(&I, VF);
7219       }
7220 
7221       InstructionCost GatherScatterCost =
7222           isLegalGatherOrScatter(&I, VF)
7223               ? getGatherScatterCost(&I, VF) * NumAccesses
7224               : InstructionCost::getInvalid();
7225 
7226       InstructionCost ScalarizationCost =
7227           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7228 
7229       // Choose better solution for the current VF,
7230       // write down this decision and use it during vectorization.
7231       InstructionCost Cost;
7232       InstWidening Decision;
7233       if (InterleaveCost <= GatherScatterCost &&
7234           InterleaveCost < ScalarizationCost) {
7235         Decision = CM_Interleave;
7236         Cost = InterleaveCost;
7237       } else if (GatherScatterCost < ScalarizationCost) {
7238         Decision = CM_GatherScatter;
7239         Cost = GatherScatterCost;
7240       } else {
7241         Decision = CM_Scalarize;
7242         Cost = ScalarizationCost;
7243       }
7244       // If the instructions belongs to an interleave group, the whole group
7245       // receives the same decision. The whole group receives the cost, but
7246       // the cost will actually be assigned to one instruction.
7247       if (auto Group = getInterleavedAccessGroup(&I))
7248         setWideningDecision(Group, VF, Decision, Cost);
7249       else
7250         setWideningDecision(&I, VF, Decision, Cost);
7251     }
7252   }
7253 
7254   // Make sure that any load of address and any other address computation
7255   // remains scalar unless there is gather/scatter support. This avoids
7256   // inevitable extracts into address registers, and also has the benefit of
7257   // activating LSR more, since that pass can't optimize vectorized
7258   // addresses.
7259   if (TTI.prefersVectorizedAddressing())
7260     return;
7261 
7262   // Start with all scalar pointer uses.
7263   SmallPtrSet<Instruction *, 8> AddrDefs;
7264   for (BasicBlock *BB : TheLoop->blocks())
7265     for (Instruction &I : *BB) {
7266       Instruction *PtrDef =
7267         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7268       if (PtrDef && TheLoop->contains(PtrDef) &&
7269           getWideningDecision(&I, VF) != CM_GatherScatter)
7270         AddrDefs.insert(PtrDef);
7271     }
7272 
7273   // Add all instructions used to generate the addresses.
7274   SmallVector<Instruction *, 4> Worklist;
7275   append_range(Worklist, AddrDefs);
7276   while (!Worklist.empty()) {
7277     Instruction *I = Worklist.pop_back_val();
7278     for (auto &Op : I->operands())
7279       if (auto *InstOp = dyn_cast<Instruction>(Op))
7280         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7281             AddrDefs.insert(InstOp).second)
7282           Worklist.push_back(InstOp);
7283   }
7284 
7285   for (auto *I : AddrDefs) {
7286     if (isa<LoadInst>(I)) {
7287       // Setting the desired widening decision should ideally be handled in
7288       // by cost functions, but since this involves the task of finding out
7289       // if the loaded register is involved in an address computation, it is
7290       // instead changed here when we know this is the case.
7291       InstWidening Decision = getWideningDecision(I, VF);
7292       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7293         // Scalarize a widened load of address.
7294         setWideningDecision(
7295             I, VF, CM_Scalarize,
7296             (VF.getKnownMinValue() *
7297              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7298       else if (auto Group = getInterleavedAccessGroup(I)) {
7299         // Scalarize an interleave group of address loads.
7300         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7301           if (Instruction *Member = Group->getMember(I))
7302             setWideningDecision(
7303                 Member, VF, CM_Scalarize,
7304                 (VF.getKnownMinValue() *
7305                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7306         }
7307       }
7308     } else
7309       // Make sure I gets scalarized and a cost estimate without
7310       // scalarization overhead.
7311       ForcedScalars[VF].insert(I);
7312   }
7313 }
7314 
7315 InstructionCost
7316 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7317                                                Type *&VectorTy) {
7318   Type *RetTy = I->getType();
7319   if (canTruncateToMinimalBitwidth(I, VF))
7320     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7321   auto SE = PSE.getSE();
7322   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7323 
7324   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7325                                                 ElementCount VF) -> bool {
7326     if (VF.isScalar())
7327       return true;
7328 
7329     auto Scalarized = InstsToScalarize.find(VF);
7330     assert(Scalarized != InstsToScalarize.end() &&
7331            "VF not yet analyzed for scalarization profitability");
7332     return !Scalarized->second.count(I) &&
7333            llvm::all_of(I->users(), [&](User *U) {
7334              auto *UI = cast<Instruction>(U);
7335              return !Scalarized->second.count(UI);
7336            });
7337   };
7338   (void) hasSingleCopyAfterVectorization;
7339 
7340   if (isScalarAfterVectorization(I, VF)) {
7341     // With the exception of GEPs and PHIs, after scalarization there should
7342     // only be one copy of the instruction generated in the loop. This is
7343     // because the VF is either 1, or any instructions that need scalarizing
7344     // have already been dealt with by the the time we get here. As a result,
7345     // it means we don't have to multiply the instruction cost by VF.
7346     assert(I->getOpcode() == Instruction::GetElementPtr ||
7347            I->getOpcode() == Instruction::PHI ||
7348            (I->getOpcode() == Instruction::BitCast &&
7349             I->getType()->isPointerTy()) ||
7350            hasSingleCopyAfterVectorization(I, VF));
7351     VectorTy = RetTy;
7352   } else
7353     VectorTy = ToVectorTy(RetTy, VF);
7354 
7355   // TODO: We need to estimate the cost of intrinsic calls.
7356   switch (I->getOpcode()) {
7357   case Instruction::GetElementPtr:
7358     // We mark this instruction as zero-cost because the cost of GEPs in
7359     // vectorized code depends on whether the corresponding memory instruction
7360     // is scalarized or not. Therefore, we handle GEPs with the memory
7361     // instruction cost.
7362     return 0;
7363   case Instruction::Br: {
7364     // In cases of scalarized and predicated instructions, there will be VF
7365     // predicated blocks in the vectorized loop. Each branch around these
7366     // blocks requires also an extract of its vector compare i1 element.
7367     bool ScalarPredicatedBB = false;
7368     BranchInst *BI = cast<BranchInst>(I);
7369     if (VF.isVector() && BI->isConditional() &&
7370         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7371          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7372       ScalarPredicatedBB = true;
7373 
7374     if (ScalarPredicatedBB) {
7375       // Not possible to scalarize scalable vector with predicated instructions.
7376       if (VF.isScalable())
7377         return InstructionCost::getInvalid();
7378       // Return cost for branches around scalarized and predicated blocks.
7379       auto *Vec_i1Ty =
7380           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7381       return (
7382           TTI.getScalarizationOverhead(
7383               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7384           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7385     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7386       // The back-edge branch will remain, as will all scalar branches.
7387       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7388     else
7389       // This branch will be eliminated by if-conversion.
7390       return 0;
7391     // Note: We currently assume zero cost for an unconditional branch inside
7392     // a predicated block since it will become a fall-through, although we
7393     // may decide in the future to call TTI for all branches.
7394   }
7395   case Instruction::PHI: {
7396     auto *Phi = cast<PHINode>(I);
7397 
7398     // First-order recurrences are replaced by vector shuffles inside the loop.
7399     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7400     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7401       return TTI.getShuffleCost(
7402           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7403           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7404 
7405     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7406     // converted into select instructions. We require N - 1 selects per phi
7407     // node, where N is the number of incoming values.
7408     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7409       return (Phi->getNumIncomingValues() - 1) *
7410              TTI.getCmpSelInstrCost(
7411                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7412                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7413                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7414 
7415     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7416   }
7417   case Instruction::UDiv:
7418   case Instruction::SDiv:
7419   case Instruction::URem:
7420   case Instruction::SRem:
7421     // If we have a predicated instruction, it may not be executed for each
7422     // vector lane. Get the scalarization cost and scale this amount by the
7423     // probability of executing the predicated block. If the instruction is not
7424     // predicated, we fall through to the next case.
7425     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7426       InstructionCost Cost = 0;
7427 
7428       // These instructions have a non-void type, so account for the phi nodes
7429       // that we will create. This cost is likely to be zero. The phi node
7430       // cost, if any, should be scaled by the block probability because it
7431       // models a copy at the end of each predicated block.
7432       Cost += VF.getKnownMinValue() *
7433               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7434 
7435       // The cost of the non-predicated instruction.
7436       Cost += VF.getKnownMinValue() *
7437               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7438 
7439       // The cost of insertelement and extractelement instructions needed for
7440       // scalarization.
7441       Cost += getScalarizationOverhead(I, VF);
7442 
7443       // Scale the cost by the probability of executing the predicated blocks.
7444       // This assumes the predicated block for each vector lane is equally
7445       // likely.
7446       return Cost / getReciprocalPredBlockProb();
7447     }
7448     LLVM_FALLTHROUGH;
7449   case Instruction::Add:
7450   case Instruction::FAdd:
7451   case Instruction::Sub:
7452   case Instruction::FSub:
7453   case Instruction::Mul:
7454   case Instruction::FMul:
7455   case Instruction::FDiv:
7456   case Instruction::FRem:
7457   case Instruction::Shl:
7458   case Instruction::LShr:
7459   case Instruction::AShr:
7460   case Instruction::And:
7461   case Instruction::Or:
7462   case Instruction::Xor: {
7463     // Since we will replace the stride by 1 the multiplication should go away.
7464     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7465       return 0;
7466 
7467     // Detect reduction patterns
7468     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7469       return *RedCost;
7470 
7471     // Certain instructions can be cheaper to vectorize if they have a constant
7472     // second vector operand. One example of this are shifts on x86.
7473     Value *Op2 = I->getOperand(1);
7474     TargetTransformInfo::OperandValueProperties Op2VP;
7475     TargetTransformInfo::OperandValueKind Op2VK =
7476         TTI.getOperandInfo(Op2, Op2VP);
7477     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7478       Op2VK = TargetTransformInfo::OK_UniformValue;
7479 
7480     SmallVector<const Value *, 4> Operands(I->operand_values());
7481     return TTI.getArithmeticInstrCost(
7482         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7483         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7484   }
7485   case Instruction::FNeg: {
7486     return TTI.getArithmeticInstrCost(
7487         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7488         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7489         TargetTransformInfo::OP_None, I->getOperand(0), I);
7490   }
7491   case Instruction::Select: {
7492     SelectInst *SI = cast<SelectInst>(I);
7493     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7494     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7495 
7496     const Value *Op0, *Op1;
7497     using namespace llvm::PatternMatch;
7498     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7499                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7500       // select x, y, false --> x & y
7501       // select x, true, y --> x | y
7502       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7503       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7504       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7505       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7506       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7507               Op1->getType()->getScalarSizeInBits() == 1);
7508 
7509       SmallVector<const Value *, 2> Operands{Op0, Op1};
7510       return TTI.getArithmeticInstrCost(
7511           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7512           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7513     }
7514 
7515     Type *CondTy = SI->getCondition()->getType();
7516     if (!ScalarCond)
7517       CondTy = VectorType::get(CondTy, VF);
7518 
7519     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7520     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7521       Pred = Cmp->getPredicate();
7522     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7523                                   CostKind, I);
7524   }
7525   case Instruction::ICmp:
7526   case Instruction::FCmp: {
7527     Type *ValTy = I->getOperand(0)->getType();
7528     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7529     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7530       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7531     VectorTy = ToVectorTy(ValTy, VF);
7532     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7533                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7534                                   I);
7535   }
7536   case Instruction::Store:
7537   case Instruction::Load: {
7538     ElementCount Width = VF;
7539     if (Width.isVector()) {
7540       InstWidening Decision = getWideningDecision(I, Width);
7541       assert(Decision != CM_Unknown &&
7542              "CM decision should be taken at this point");
7543       if (Decision == CM_Scalarize)
7544         Width = ElementCount::getFixed(1);
7545     }
7546     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7547     return getMemoryInstructionCost(I, VF);
7548   }
7549   case Instruction::BitCast:
7550     if (I->getType()->isPointerTy())
7551       return 0;
7552     LLVM_FALLTHROUGH;
7553   case Instruction::ZExt:
7554   case Instruction::SExt:
7555   case Instruction::FPToUI:
7556   case Instruction::FPToSI:
7557   case Instruction::FPExt:
7558   case Instruction::PtrToInt:
7559   case Instruction::IntToPtr:
7560   case Instruction::SIToFP:
7561   case Instruction::UIToFP:
7562   case Instruction::Trunc:
7563   case Instruction::FPTrunc: {
7564     // Computes the CastContextHint from a Load/Store instruction.
7565     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7566       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7567              "Expected a load or a store!");
7568 
7569       if (VF.isScalar() || !TheLoop->contains(I))
7570         return TTI::CastContextHint::Normal;
7571 
7572       switch (getWideningDecision(I, VF)) {
7573       case LoopVectorizationCostModel::CM_GatherScatter:
7574         return TTI::CastContextHint::GatherScatter;
7575       case LoopVectorizationCostModel::CM_Interleave:
7576         return TTI::CastContextHint::Interleave;
7577       case LoopVectorizationCostModel::CM_Scalarize:
7578       case LoopVectorizationCostModel::CM_Widen:
7579         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7580                                         : TTI::CastContextHint::Normal;
7581       case LoopVectorizationCostModel::CM_Widen_Reverse:
7582         return TTI::CastContextHint::Reversed;
7583       case LoopVectorizationCostModel::CM_Unknown:
7584         llvm_unreachable("Instr did not go through cost modelling?");
7585       }
7586 
7587       llvm_unreachable("Unhandled case!");
7588     };
7589 
7590     unsigned Opcode = I->getOpcode();
7591     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7592     // For Trunc, the context is the only user, which must be a StoreInst.
7593     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7594       if (I->hasOneUse())
7595         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7596           CCH = ComputeCCH(Store);
7597     }
7598     // For Z/Sext, the context is the operand, which must be a LoadInst.
7599     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7600              Opcode == Instruction::FPExt) {
7601       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7602         CCH = ComputeCCH(Load);
7603     }
7604 
7605     // We optimize the truncation of induction variables having constant
7606     // integer steps. The cost of these truncations is the same as the scalar
7607     // operation.
7608     if (isOptimizableIVTruncate(I, VF)) {
7609       auto *Trunc = cast<TruncInst>(I);
7610       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7611                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7612     }
7613 
7614     // Detect reduction patterns
7615     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7616       return *RedCost;
7617 
7618     Type *SrcScalarTy = I->getOperand(0)->getType();
7619     Type *SrcVecTy =
7620         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7621     if (canTruncateToMinimalBitwidth(I, VF)) {
7622       // This cast is going to be shrunk. This may remove the cast or it might
7623       // turn it into slightly different cast. For example, if MinBW == 16,
7624       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7625       //
7626       // Calculate the modified src and dest types.
7627       Type *MinVecTy = VectorTy;
7628       if (Opcode == Instruction::Trunc) {
7629         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7630         VectorTy =
7631             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7632       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7633         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7634         VectorTy =
7635             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7636       }
7637     }
7638 
7639     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7640   }
7641   case Instruction::Call: {
7642     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7643       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7644         return *RedCost;
7645     bool NeedToScalarize;
7646     CallInst *CI = cast<CallInst>(I);
7647     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7648     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7649       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7650       return std::min(CallCost, IntrinsicCost);
7651     }
7652     return CallCost;
7653   }
7654   case Instruction::ExtractValue:
7655     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7656   case Instruction::Alloca:
7657     // We cannot easily widen alloca to a scalable alloca, as
7658     // the result would need to be a vector of pointers.
7659     if (VF.isScalable())
7660       return InstructionCost::getInvalid();
7661     LLVM_FALLTHROUGH;
7662   default:
7663     // This opcode is unknown. Assume that it is the same as 'mul'.
7664     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7665   } // end of switch.
7666 }
7667 
7668 char LoopVectorize::ID = 0;
7669 
7670 static const char lv_name[] = "Loop Vectorization";
7671 
7672 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7673 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7674 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7675 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7676 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7677 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7678 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7679 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7680 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7681 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7682 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7683 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7684 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7685 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7686 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7687 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7688 
7689 namespace llvm {
7690 
7691 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7692 
7693 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7694                               bool VectorizeOnlyWhenForced) {
7695   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7696 }
7697 
7698 } // end namespace llvm
7699 
7700 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7701   // Check if the pointer operand of a load or store instruction is
7702   // consecutive.
7703   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7704     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7705   return false;
7706 }
7707 
7708 void LoopVectorizationCostModel::collectValuesToIgnore() {
7709   // Ignore ephemeral values.
7710   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7711 
7712   // Ignore type-promoting instructions we identified during reduction
7713   // detection.
7714   for (auto &Reduction : Legal->getReductionVars()) {
7715     const RecurrenceDescriptor &RedDes = Reduction.second;
7716     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7717     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7718   }
7719   // Ignore type-casting instructions we identified during induction
7720   // detection.
7721   for (auto &Induction : Legal->getInductionVars()) {
7722     const InductionDescriptor &IndDes = Induction.second;
7723     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7724     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7725   }
7726 }
7727 
7728 void LoopVectorizationCostModel::collectInLoopReductions() {
7729   for (auto &Reduction : Legal->getReductionVars()) {
7730     PHINode *Phi = Reduction.first;
7731     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7732 
7733     // We don't collect reductions that are type promoted (yet).
7734     if (RdxDesc.getRecurrenceType() != Phi->getType())
7735       continue;
7736 
7737     // If the target would prefer this reduction to happen "in-loop", then we
7738     // want to record it as such.
7739     unsigned Opcode = RdxDesc.getOpcode();
7740     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7741         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7742                                    TargetTransformInfo::ReductionFlags()))
7743       continue;
7744 
7745     // Check that we can correctly put the reductions into the loop, by
7746     // finding the chain of operations that leads from the phi to the loop
7747     // exit value.
7748     SmallVector<Instruction *, 4> ReductionOperations =
7749         RdxDesc.getReductionOpChain(Phi, TheLoop);
7750     bool InLoop = !ReductionOperations.empty();
7751     if (InLoop) {
7752       InLoopReductionChains[Phi] = ReductionOperations;
7753       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7754       Instruction *LastChain = Phi;
7755       for (auto *I : ReductionOperations) {
7756         InLoopReductionImmediateChains[I] = LastChain;
7757         LastChain = I;
7758       }
7759     }
7760     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7761                       << " reduction for phi: " << *Phi << "\n");
7762   }
7763 }
7764 
7765 // TODO: we could return a pair of values that specify the max VF and
7766 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7767 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7768 // doesn't have a cost model that can choose which plan to execute if
7769 // more than one is generated.
7770 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7771                                  LoopVectorizationCostModel &CM) {
7772   unsigned WidestType;
7773   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7774   return WidestVectorRegBits / WidestType;
7775 }
7776 
7777 VectorizationFactor
7778 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7779   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7780   ElementCount VF = UserVF;
7781   // Outer loop handling: They may require CFG and instruction level
7782   // transformations before even evaluating whether vectorization is profitable.
7783   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7784   // the vectorization pipeline.
7785   if (!OrigLoop->isInnermost()) {
7786     // If the user doesn't provide a vectorization factor, determine a
7787     // reasonable one.
7788     if (UserVF.isZero()) {
7789       VF = ElementCount::getFixed(determineVPlanVF(
7790           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7791               .getFixedSize(),
7792           CM));
7793       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7794 
7795       // Make sure we have a VF > 1 for stress testing.
7796       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7797         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7798                           << "overriding computed VF.\n");
7799         VF = ElementCount::getFixed(4);
7800       }
7801     }
7802     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7803     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7804            "VF needs to be a power of two");
7805     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7806                       << "VF " << VF << " to build VPlans.\n");
7807     buildVPlans(VF, VF);
7808 
7809     // For VPlan build stress testing, we bail out after VPlan construction.
7810     if (VPlanBuildStressTest)
7811       return VectorizationFactor::Disabled();
7812 
7813     return {VF, 0 /*Cost*/};
7814   }
7815 
7816   LLVM_DEBUG(
7817       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7818                 "VPlan-native path.\n");
7819   return VectorizationFactor::Disabled();
7820 }
7821 
7822 Optional<VectorizationFactor>
7823 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7824   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7825   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7826   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7827     return None;
7828 
7829   // Invalidate interleave groups if all blocks of loop will be predicated.
7830   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7831       !useMaskedInterleavedAccesses(*TTI)) {
7832     LLVM_DEBUG(
7833         dbgs()
7834         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7835            "which requires masked-interleaved support.\n");
7836     if (CM.InterleaveInfo.invalidateGroups())
7837       // Invalidating interleave groups also requires invalidating all decisions
7838       // based on them, which includes widening decisions and uniform and scalar
7839       // values.
7840       CM.invalidateCostModelingDecisions();
7841   }
7842 
7843   ElementCount MaxUserVF =
7844       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7845   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7846   if (!UserVF.isZero() && UserVFIsLegal) {
7847     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7848            "VF needs to be a power of two");
7849     // Collect the instructions (and their associated costs) that will be more
7850     // profitable to scalarize.
7851     if (CM.selectUserVectorizationFactor(UserVF)) {
7852       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7853       CM.collectInLoopReductions();
7854       buildVPlansWithVPRecipes(UserVF, UserVF);
7855       LLVM_DEBUG(printPlans(dbgs()));
7856       return {{UserVF, 0}};
7857     } else
7858       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7859                               "InvalidCost", ORE, OrigLoop);
7860   }
7861 
7862   // Populate the set of Vectorization Factor Candidates.
7863   ElementCountSet VFCandidates;
7864   for (auto VF = ElementCount::getFixed(1);
7865        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7866     VFCandidates.insert(VF);
7867   for (auto VF = ElementCount::getScalable(1);
7868        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7869     VFCandidates.insert(VF);
7870 
7871   for (const auto &VF : VFCandidates) {
7872     // Collect Uniform and Scalar instructions after vectorization with VF.
7873     CM.collectUniformsAndScalars(VF);
7874 
7875     // Collect the instructions (and their associated costs) that will be more
7876     // profitable to scalarize.
7877     if (VF.isVector())
7878       CM.collectInstsToScalarize(VF);
7879   }
7880 
7881   CM.collectInLoopReductions();
7882   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7883   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7884 
7885   LLVM_DEBUG(printPlans(dbgs()));
7886   if (!MaxFactors.hasVector())
7887     return VectorizationFactor::Disabled();
7888 
7889   // Select the optimal vectorization factor.
7890   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7891 
7892   // Check if it is profitable to vectorize with runtime checks.
7893   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7894   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7895     bool PragmaThresholdReached =
7896         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7897     bool ThresholdReached =
7898         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7899     if ((ThresholdReached && !Hints.allowReordering()) ||
7900         PragmaThresholdReached) {
7901       ORE->emit([&]() {
7902         return OptimizationRemarkAnalysisAliasing(
7903                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7904                    OrigLoop->getHeader())
7905                << "loop not vectorized: cannot prove it is safe to reorder "
7906                   "memory operations";
7907       });
7908       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7909       Hints.emitRemarkWithHints();
7910       return VectorizationFactor::Disabled();
7911     }
7912   }
7913   return SelectedVF;
7914 }
7915 
7916 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7917   assert(count_if(VPlans,
7918                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7919              1 &&
7920          "Best VF has not a single VPlan.");
7921 
7922   for (const VPlanPtr &Plan : VPlans) {
7923     if (Plan->hasVF(VF))
7924       return *Plan.get();
7925   }
7926   llvm_unreachable("No plan found!");
7927 }
7928 
7929 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7930                                            VPlan &BestVPlan,
7931                                            InnerLoopVectorizer &ILV,
7932                                            DominatorTree *DT) {
7933   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7934                     << '\n');
7935 
7936   // Perform the actual loop transformation.
7937 
7938   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7939   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7940   Value *CanonicalIVStartValue;
7941   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7942       ILV.createVectorizedLoopSkeleton();
7943   ILV.collectPoisonGeneratingRecipes(State);
7944 
7945   ILV.printDebugTracesAtStart();
7946 
7947   //===------------------------------------------------===//
7948   //
7949   // Notice: any optimization or new instruction that go
7950   // into the code below should also be implemented in
7951   // the cost-model.
7952   //
7953   //===------------------------------------------------===//
7954 
7955   // 2. Copy and widen instructions from the old loop into the new loop.
7956   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7957                              ILV.getOrCreateVectorTripCount(nullptr),
7958                              CanonicalIVStartValue, State);
7959   BestVPlan.execute(&State);
7960 
7961   // Keep all loop hints from the original loop on the vector loop (we'll
7962   // replace the vectorizer-specific hints below).
7963   MDNode *OrigLoopID = OrigLoop->getLoopID();
7964 
7965   Optional<MDNode *> VectorizedLoopID =
7966       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7967                                       LLVMLoopVectorizeFollowupVectorized});
7968 
7969   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7970   if (VectorizedLoopID.hasValue())
7971     L->setLoopID(VectorizedLoopID.getValue());
7972   else {
7973     // Keep all loop hints from the original loop on the vector loop (we'll
7974     // replace the vectorizer-specific hints below).
7975     if (MDNode *LID = OrigLoop->getLoopID())
7976       L->setLoopID(LID);
7977 
7978     LoopVectorizeHints Hints(L, true, *ORE);
7979     Hints.setAlreadyVectorized();
7980   }
7981 
7982   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7983   //    predication, updating analyses.
7984   ILV.fixVectorizedLoop(State);
7985 
7986   ILV.printDebugTracesAtEnd();
7987 }
7988 
7989 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7990 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7991   for (const auto &Plan : VPlans)
7992     if (PrintVPlansInDotFormat)
7993       Plan->printDOT(O);
7994     else
7995       Plan->print(O);
7996 }
7997 #endif
7998 
7999 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8000     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8001 
8002   // We create new control-flow for the vectorized loop, so the original exit
8003   // conditions will be dead after vectorization if it's only used by the
8004   // terminator
8005   SmallVector<BasicBlock*> ExitingBlocks;
8006   OrigLoop->getExitingBlocks(ExitingBlocks);
8007   for (auto *BB : ExitingBlocks) {
8008     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8009     if (!Cmp || !Cmp->hasOneUse())
8010       continue;
8011 
8012     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8013     if (!DeadInstructions.insert(Cmp).second)
8014       continue;
8015 
8016     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8017     // TODO: can recurse through operands in general
8018     for (Value *Op : Cmp->operands()) {
8019       if (isa<TruncInst>(Op) && Op->hasOneUse())
8020           DeadInstructions.insert(cast<Instruction>(Op));
8021     }
8022   }
8023 
8024   // We create new "steps" for induction variable updates to which the original
8025   // induction variables map. An original update instruction will be dead if
8026   // all its users except the induction variable are dead.
8027   auto *Latch = OrigLoop->getLoopLatch();
8028   for (auto &Induction : Legal->getInductionVars()) {
8029     PHINode *Ind = Induction.first;
8030     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8031 
8032     // If the tail is to be folded by masking, the primary induction variable,
8033     // if exists, isn't dead: it will be used for masking. Don't kill it.
8034     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8035       continue;
8036 
8037     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8038           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8039         }))
8040       DeadInstructions.insert(IndUpdate);
8041   }
8042 }
8043 
8044 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8045 
8046 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8047   SmallVector<Metadata *, 4> MDs;
8048   // Reserve first location for self reference to the LoopID metadata node.
8049   MDs.push_back(nullptr);
8050   bool IsUnrollMetadata = false;
8051   MDNode *LoopID = L->getLoopID();
8052   if (LoopID) {
8053     // First find existing loop unrolling disable metadata.
8054     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8055       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8056       if (MD) {
8057         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8058         IsUnrollMetadata =
8059             S && S->getString().startswith("llvm.loop.unroll.disable");
8060       }
8061       MDs.push_back(LoopID->getOperand(i));
8062     }
8063   }
8064 
8065   if (!IsUnrollMetadata) {
8066     // Add runtime unroll disable metadata.
8067     LLVMContext &Context = L->getHeader()->getContext();
8068     SmallVector<Metadata *, 1> DisableOperands;
8069     DisableOperands.push_back(
8070         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8071     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8072     MDs.push_back(DisableNode);
8073     MDNode *NewLoopID = MDNode::get(Context, MDs);
8074     // Set operand 0 to refer to the loop id itself.
8075     NewLoopID->replaceOperandWith(0, NewLoopID);
8076     L->setLoopID(NewLoopID);
8077   }
8078 }
8079 
8080 //===--------------------------------------------------------------------===//
8081 // EpilogueVectorizerMainLoop
8082 //===--------------------------------------------------------------------===//
8083 
8084 /// This function is partially responsible for generating the control flow
8085 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8086 std::pair<BasicBlock *, Value *>
8087 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8088   MDNode *OrigLoopID = OrigLoop->getLoopID();
8089   Loop *Lp = createVectorLoopSkeleton("");
8090 
8091   // Generate the code to check the minimum iteration count of the vector
8092   // epilogue (see below).
8093   EPI.EpilogueIterationCountCheck =
8094       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8095   EPI.EpilogueIterationCountCheck->setName("iter.check");
8096 
8097   // Generate the code to check any assumptions that we've made for SCEV
8098   // expressions.
8099   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8100 
8101   // Generate the code that checks at runtime if arrays overlap. We put the
8102   // checks into a separate block to make the more common case of few elements
8103   // faster.
8104   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8105 
8106   // Generate the iteration count check for the main loop, *after* the check
8107   // for the epilogue loop, so that the path-length is shorter for the case
8108   // that goes directly through the vector epilogue. The longer-path length for
8109   // the main loop is compensated for, by the gain from vectorizing the larger
8110   // trip count. Note: the branch will get updated later on when we vectorize
8111   // the epilogue.
8112   EPI.MainLoopIterationCountCheck =
8113       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8114 
8115   // Generate the induction variable.
8116   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8117   EPI.VectorTripCount = CountRoundDown;
8118   createHeaderBranch(Lp);
8119 
8120   // Skip induction resume value creation here because they will be created in
8121   // the second pass. If we created them here, they wouldn't be used anyway,
8122   // because the vplan in the second pass still contains the inductions from the
8123   // original loop.
8124 
8125   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
8126 }
8127 
8128 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8129   LLVM_DEBUG({
8130     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8131            << "Main Loop VF:" << EPI.MainLoopVF
8132            << ", Main Loop UF:" << EPI.MainLoopUF
8133            << ", Epilogue Loop VF:" << EPI.EpilogueVF
8134            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8135   });
8136 }
8137 
8138 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8139   DEBUG_WITH_TYPE(VerboseDebug, {
8140     dbgs() << "intermediate fn:\n"
8141            << *OrigLoop->getHeader()->getParent() << "\n";
8142   });
8143 }
8144 
8145 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8146     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8147   assert(L && "Expected valid Loop.");
8148   assert(Bypass && "Expected valid bypass basic block.");
8149   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
8150   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8151   Value *Count = getOrCreateTripCount(L);
8152   // Reuse existing vector loop preheader for TC checks.
8153   // Note that new preheader block is generated for vector loop.
8154   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8155   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8156 
8157   // Generate code to check if the loop's trip count is less than VF * UF of the
8158   // main vector loop.
8159   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8160       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8161 
8162   Value *CheckMinIters = Builder.CreateICmp(
8163       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
8164       "min.iters.check");
8165 
8166   if (!ForEpilogue)
8167     TCCheckBlock->setName("vector.main.loop.iter.check");
8168 
8169   // Create new preheader for vector loop.
8170   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8171                                    DT, LI, nullptr, "vector.ph");
8172 
8173   if (ForEpilogue) {
8174     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8175                                  DT->getNode(Bypass)->getIDom()) &&
8176            "TC check is expected to dominate Bypass");
8177 
8178     // Update dominator for Bypass & LoopExit.
8179     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8180     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8181       // For loops with multiple exits, there's no edge from the middle block
8182       // to exit blocks (as the epilogue must run) and thus no need to update
8183       // the immediate dominator of the exit blocks.
8184       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8185 
8186     LoopBypassBlocks.push_back(TCCheckBlock);
8187 
8188     // Save the trip count so we don't have to regenerate it in the
8189     // vec.epilog.iter.check. This is safe to do because the trip count
8190     // generated here dominates the vector epilog iter check.
8191     EPI.TripCount = Count;
8192   }
8193 
8194   ReplaceInstWithInst(
8195       TCCheckBlock->getTerminator(),
8196       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8197 
8198   return TCCheckBlock;
8199 }
8200 
8201 //===--------------------------------------------------------------------===//
8202 // EpilogueVectorizerEpilogueLoop
8203 //===--------------------------------------------------------------------===//
8204 
8205 /// This function is partially responsible for generating the control flow
8206 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8207 std::pair<BasicBlock *, Value *>
8208 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8209   MDNode *OrigLoopID = OrigLoop->getLoopID();
8210   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8211 
8212   // Now, compare the remaining count and if there aren't enough iterations to
8213   // execute the vectorized epilogue skip to the scalar part.
8214   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8215   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8216   LoopVectorPreHeader =
8217       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8218                  LI, nullptr, "vec.epilog.ph");
8219   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8220                                           VecEpilogueIterationCountCheck);
8221 
8222   // Adjust the control flow taking the state info from the main loop
8223   // vectorization into account.
8224   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8225          "expected this to be saved from the previous pass.");
8226   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8227       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8228 
8229   DT->changeImmediateDominator(LoopVectorPreHeader,
8230                                EPI.MainLoopIterationCountCheck);
8231 
8232   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8233       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8234 
8235   if (EPI.SCEVSafetyCheck)
8236     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8237         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8238   if (EPI.MemSafetyCheck)
8239     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8240         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8241 
8242   DT->changeImmediateDominator(
8243       VecEpilogueIterationCountCheck,
8244       VecEpilogueIterationCountCheck->getSinglePredecessor());
8245 
8246   DT->changeImmediateDominator(LoopScalarPreHeader,
8247                                EPI.EpilogueIterationCountCheck);
8248   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8249     // If there is an epilogue which must run, there's no edge from the
8250     // middle block to exit blocks  and thus no need to update the immediate
8251     // dominator of the exit blocks.
8252     DT->changeImmediateDominator(LoopExitBlock,
8253                                  EPI.EpilogueIterationCountCheck);
8254 
8255   // Keep track of bypass blocks, as they feed start values to the induction
8256   // phis in the scalar loop preheader.
8257   if (EPI.SCEVSafetyCheck)
8258     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8259   if (EPI.MemSafetyCheck)
8260     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8261   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8262 
8263   // Generate a resume induction for the vector epilogue and put it in the
8264   // vector epilogue preheader
8265   Type *IdxTy = Legal->getWidestInductionType();
8266   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8267                                          LoopVectorPreHeader->getFirstNonPHI());
8268   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8269   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8270                            EPI.MainLoopIterationCountCheck);
8271 
8272   // Generate the induction variable.
8273   createHeaderBranch(Lp);
8274 
8275   // Generate induction resume values. These variables save the new starting
8276   // indexes for the scalar loop. They are used to test if there are any tail
8277   // iterations left once the vector loop has completed.
8278   // Note that when the vectorized epilogue is skipped due to iteration count
8279   // check, then the resume value for the induction variable comes from
8280   // the trip count of the main vector loop, hence passing the AdditionalBypass
8281   // argument.
8282   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8283                                    EPI.VectorTripCount} /* AdditionalBypass */);
8284 
8285   AddRuntimeUnrollDisableMetaData(Lp);
8286   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8287 }
8288 
8289 BasicBlock *
8290 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8291     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8292 
8293   assert(EPI.TripCount &&
8294          "Expected trip count to have been safed in the first pass.");
8295   assert(
8296       (!isa<Instruction>(EPI.TripCount) ||
8297        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8298       "saved trip count does not dominate insertion point.");
8299   Value *TC = EPI.TripCount;
8300   IRBuilder<> Builder(Insert->getTerminator());
8301   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8302 
8303   // Generate code to check if the loop's trip count is less than VF * UF of the
8304   // vector epilogue loop.
8305   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8306       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8307 
8308   Value *CheckMinIters =
8309       Builder.CreateICmp(P, Count,
8310                          createStepForVF(Builder, Count->getType(),
8311                                          EPI.EpilogueVF, EPI.EpilogueUF),
8312                          "min.epilog.iters.check");
8313 
8314   ReplaceInstWithInst(
8315       Insert->getTerminator(),
8316       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8317 
8318   LoopBypassBlocks.push_back(Insert);
8319   return Insert;
8320 }
8321 
8322 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8323   LLVM_DEBUG({
8324     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8325            << "Epilogue Loop VF:" << EPI.EpilogueVF
8326            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8327   });
8328 }
8329 
8330 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8331   DEBUG_WITH_TYPE(VerboseDebug, {
8332     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8333   });
8334 }
8335 
8336 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8337     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8338   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8339   bool PredicateAtRangeStart = Predicate(Range.Start);
8340 
8341   for (ElementCount TmpVF = Range.Start * 2;
8342        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8343     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8344       Range.End = TmpVF;
8345       break;
8346     }
8347 
8348   return PredicateAtRangeStart;
8349 }
8350 
8351 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8352 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8353 /// of VF's starting at a given VF and extending it as much as possible. Each
8354 /// vectorization decision can potentially shorten this sub-range during
8355 /// buildVPlan().
8356 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8357                                            ElementCount MaxVF) {
8358   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8359   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8360     VFRange SubRange = {VF, MaxVFPlusOne};
8361     VPlans.push_back(buildVPlan(SubRange));
8362     VF = SubRange.End;
8363   }
8364 }
8365 
8366 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8367                                          VPlanPtr &Plan) {
8368   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8369 
8370   // Look for cached value.
8371   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8372   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8373   if (ECEntryIt != EdgeMaskCache.end())
8374     return ECEntryIt->second;
8375 
8376   VPValue *SrcMask = createBlockInMask(Src, Plan);
8377 
8378   // The terminator has to be a branch inst!
8379   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8380   assert(BI && "Unexpected terminator found");
8381 
8382   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8383     return EdgeMaskCache[Edge] = SrcMask;
8384 
8385   // If source is an exiting block, we know the exit edge is dynamically dead
8386   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8387   // adding uses of an otherwise potentially dead instruction.
8388   if (OrigLoop->isLoopExiting(Src))
8389     return EdgeMaskCache[Edge] = SrcMask;
8390 
8391   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8392   assert(EdgeMask && "No Edge Mask found for condition");
8393 
8394   if (BI->getSuccessor(0) != Dst)
8395     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8396 
8397   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8398     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8399     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8400     // The select version does not introduce new UB if SrcMask is false and
8401     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8402     VPValue *False = Plan->getOrAddVPValue(
8403         ConstantInt::getFalse(BI->getCondition()->getType()));
8404     EdgeMask =
8405         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8406   }
8407 
8408   return EdgeMaskCache[Edge] = EdgeMask;
8409 }
8410 
8411 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8412   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8413 
8414   // Look for cached value.
8415   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8416   if (BCEntryIt != BlockMaskCache.end())
8417     return BCEntryIt->second;
8418 
8419   // All-one mask is modelled as no-mask following the convention for masked
8420   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8421   VPValue *BlockMask = nullptr;
8422 
8423   if (OrigLoop->getHeader() == BB) {
8424     if (!CM.blockNeedsPredicationForAnyReason(BB))
8425       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8426 
8427     // Introduce the early-exit compare IV <= BTC to form header block mask.
8428     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8429     // constructing the desired canonical IV in the header block as its first
8430     // non-phi instructions.
8431     assert(CM.foldTailByMasking() && "must fold the tail");
8432     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8433     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8434 
8435     VPValue *IV = nullptr;
8436     if (Legal->getPrimaryInduction())
8437       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8438     else {
8439       auto *IVRecipe = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8440       HeaderVPBB->insert(IVRecipe, NewInsertionPoint);
8441       IV = IVRecipe;
8442     }
8443 
8444     VPBuilder::InsertPointGuard Guard(Builder);
8445     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8446     if (CM.TTI.emitGetActiveLaneMask()) {
8447       VPValue *TC = Plan->getOrCreateTripCount();
8448       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8449     } else {
8450       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8451       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8452     }
8453     return BlockMaskCache[BB] = BlockMask;
8454   }
8455 
8456   // This is the block mask. We OR all incoming edges.
8457   for (auto *Predecessor : predecessors(BB)) {
8458     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8459     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8460       return BlockMaskCache[BB] = EdgeMask;
8461 
8462     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8463       BlockMask = EdgeMask;
8464       continue;
8465     }
8466 
8467     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8468   }
8469 
8470   return BlockMaskCache[BB] = BlockMask;
8471 }
8472 
8473 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8474                                                 ArrayRef<VPValue *> Operands,
8475                                                 VFRange &Range,
8476                                                 VPlanPtr &Plan) {
8477   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8478          "Must be called with either a load or store");
8479 
8480   auto willWiden = [&](ElementCount VF) -> bool {
8481     if (VF.isScalar())
8482       return false;
8483     LoopVectorizationCostModel::InstWidening Decision =
8484         CM.getWideningDecision(I, VF);
8485     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8486            "CM decision should be taken at this point.");
8487     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8488       return true;
8489     if (CM.isScalarAfterVectorization(I, VF) ||
8490         CM.isProfitableToScalarize(I, VF))
8491       return false;
8492     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8493   };
8494 
8495   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8496     return nullptr;
8497 
8498   VPValue *Mask = nullptr;
8499   if (Legal->isMaskRequired(I))
8500     Mask = createBlockInMask(I->getParent(), Plan);
8501 
8502   // Determine if the pointer operand of the access is either consecutive or
8503   // reverse consecutive.
8504   LoopVectorizationCostModel::InstWidening Decision =
8505       CM.getWideningDecision(I, Range.Start);
8506   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8507   bool Consecutive =
8508       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8509 
8510   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8511     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8512                                               Consecutive, Reverse);
8513 
8514   StoreInst *Store = cast<StoreInst>(I);
8515   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8516                                             Mask, Consecutive, Reverse);
8517 }
8518 
8519 VPWidenIntOrFpInductionRecipe *
8520 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8521                                            ArrayRef<VPValue *> Operands) const {
8522   // Check if this is an integer or fp induction. If so, build the recipe that
8523   // produces its scalar and vector values.
8524   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) {
8525     assert(II->getStartValue() ==
8526            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8527     return new VPWidenIntOrFpInductionRecipe(Phi, Operands[0], *II);
8528   }
8529 
8530   return nullptr;
8531 }
8532 
8533 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8534     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8535     VPlan &Plan) const {
8536   // Optimize the special case where the source is a constant integer
8537   // induction variable. Notice that we can only optimize the 'trunc' case
8538   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8539   // (c) other casts depend on pointer size.
8540 
8541   // Determine whether \p K is a truncation based on an induction variable that
8542   // can be optimized.
8543   auto isOptimizableIVTruncate =
8544       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8545     return [=](ElementCount VF) -> bool {
8546       return CM.isOptimizableIVTruncate(K, VF);
8547     };
8548   };
8549 
8550   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8551           isOptimizableIVTruncate(I), Range)) {
8552 
8553     auto *Phi = cast<PHINode>(I->getOperand(0));
8554     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8555     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8556     return new VPWidenIntOrFpInductionRecipe(Phi, Start, II, I);
8557   }
8558   return nullptr;
8559 }
8560 
8561 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8562                                                 ArrayRef<VPValue *> Operands,
8563                                                 VPlanPtr &Plan) {
8564   // If all incoming values are equal, the incoming VPValue can be used directly
8565   // instead of creating a new VPBlendRecipe.
8566   VPValue *FirstIncoming = Operands[0];
8567   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8568         return FirstIncoming == Inc;
8569       })) {
8570     return Operands[0];
8571   }
8572 
8573   // We know that all PHIs in non-header blocks are converted into selects, so
8574   // we don't have to worry about the insertion order and we can just use the
8575   // builder. At this point we generate the predication tree. There may be
8576   // duplications since this is a simple recursive scan, but future
8577   // optimizations will clean it up.
8578   SmallVector<VPValue *, 2> OperandsWithMask;
8579   unsigned NumIncoming = Phi->getNumIncomingValues();
8580 
8581   for (unsigned In = 0; In < NumIncoming; In++) {
8582     VPValue *EdgeMask =
8583       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8584     assert((EdgeMask || NumIncoming == 1) &&
8585            "Multiple predecessors with one having a full mask");
8586     OperandsWithMask.push_back(Operands[In]);
8587     if (EdgeMask)
8588       OperandsWithMask.push_back(EdgeMask);
8589   }
8590   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8591 }
8592 
8593 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8594                                                    ArrayRef<VPValue *> Operands,
8595                                                    VFRange &Range) const {
8596 
8597   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8598       [this, CI](ElementCount VF) {
8599         return CM.isScalarWithPredication(CI, VF);
8600       },
8601       Range);
8602 
8603   if (IsPredicated)
8604     return nullptr;
8605 
8606   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8607   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8608              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8609              ID == Intrinsic::pseudoprobe ||
8610              ID == Intrinsic::experimental_noalias_scope_decl))
8611     return nullptr;
8612 
8613   auto willWiden = [&](ElementCount VF) -> bool {
8614     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8615     // The following case may be scalarized depending on the VF.
8616     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8617     // version of the instruction.
8618     // Is it beneficial to perform intrinsic call compared to lib call?
8619     bool NeedToScalarize = false;
8620     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8621     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8622     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8623     return UseVectorIntrinsic || !NeedToScalarize;
8624   };
8625 
8626   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8627     return nullptr;
8628 
8629   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8630   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8631 }
8632 
8633 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8634   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8635          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8636   // Instruction should be widened, unless it is scalar after vectorization,
8637   // scalarization is profitable or it is predicated.
8638   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8639     return CM.isScalarAfterVectorization(I, VF) ||
8640            CM.isProfitableToScalarize(I, VF) ||
8641            CM.isScalarWithPredication(I, VF);
8642   };
8643   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8644                                                              Range);
8645 }
8646 
8647 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8648                                            ArrayRef<VPValue *> Operands) const {
8649   auto IsVectorizableOpcode = [](unsigned Opcode) {
8650     switch (Opcode) {
8651     case Instruction::Add:
8652     case Instruction::And:
8653     case Instruction::AShr:
8654     case Instruction::BitCast:
8655     case Instruction::FAdd:
8656     case Instruction::FCmp:
8657     case Instruction::FDiv:
8658     case Instruction::FMul:
8659     case Instruction::FNeg:
8660     case Instruction::FPExt:
8661     case Instruction::FPToSI:
8662     case Instruction::FPToUI:
8663     case Instruction::FPTrunc:
8664     case Instruction::FRem:
8665     case Instruction::FSub:
8666     case Instruction::ICmp:
8667     case Instruction::IntToPtr:
8668     case Instruction::LShr:
8669     case Instruction::Mul:
8670     case Instruction::Or:
8671     case Instruction::PtrToInt:
8672     case Instruction::SDiv:
8673     case Instruction::Select:
8674     case Instruction::SExt:
8675     case Instruction::Shl:
8676     case Instruction::SIToFP:
8677     case Instruction::SRem:
8678     case Instruction::Sub:
8679     case Instruction::Trunc:
8680     case Instruction::UDiv:
8681     case Instruction::UIToFP:
8682     case Instruction::URem:
8683     case Instruction::Xor:
8684     case Instruction::ZExt:
8685       return true;
8686     }
8687     return false;
8688   };
8689 
8690   if (!IsVectorizableOpcode(I->getOpcode()))
8691     return nullptr;
8692 
8693   // Success: widen this instruction.
8694   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8695 }
8696 
8697 void VPRecipeBuilder::fixHeaderPhis() {
8698   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8699   for (VPHeaderPHIRecipe *R : PhisToFix) {
8700     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8701     VPRecipeBase *IncR =
8702         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8703     R->addOperand(IncR->getVPSingleValue());
8704   }
8705 }
8706 
8707 VPBasicBlock *VPRecipeBuilder::handleReplication(
8708     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8709     VPlanPtr &Plan) {
8710   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8711       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8712       Range);
8713 
8714   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8715       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8716       Range);
8717 
8718   // Even if the instruction is not marked as uniform, there are certain
8719   // intrinsic calls that can be effectively treated as such, so we check for
8720   // them here. Conservatively, we only do this for scalable vectors, since
8721   // for fixed-width VFs we can always fall back on full scalarization.
8722   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8723     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8724     case Intrinsic::assume:
8725     case Intrinsic::lifetime_start:
8726     case Intrinsic::lifetime_end:
8727       // For scalable vectors if one of the operands is variant then we still
8728       // want to mark as uniform, which will generate one instruction for just
8729       // the first lane of the vector. We can't scalarize the call in the same
8730       // way as for fixed-width vectors because we don't know how many lanes
8731       // there are.
8732       //
8733       // The reasons for doing it this way for scalable vectors are:
8734       //   1. For the assume intrinsic generating the instruction for the first
8735       //      lane is still be better than not generating any at all. For
8736       //      example, the input may be a splat across all lanes.
8737       //   2. For the lifetime start/end intrinsics the pointer operand only
8738       //      does anything useful when the input comes from a stack object,
8739       //      which suggests it should always be uniform. For non-stack objects
8740       //      the effect is to poison the object, which still allows us to
8741       //      remove the call.
8742       IsUniform = true;
8743       break;
8744     default:
8745       break;
8746     }
8747   }
8748 
8749   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8750                                        IsUniform, IsPredicated);
8751   setRecipe(I, Recipe);
8752   Plan->addVPValue(I, Recipe);
8753 
8754   // Find if I uses a predicated instruction. If so, it will use its scalar
8755   // value. Avoid hoisting the insert-element which packs the scalar value into
8756   // a vector value, as that happens iff all users use the vector value.
8757   for (VPValue *Op : Recipe->operands()) {
8758     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8759     if (!PredR)
8760       continue;
8761     auto *RepR =
8762         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8763     assert(RepR->isPredicated() &&
8764            "expected Replicate recipe to be predicated");
8765     RepR->setAlsoPack(false);
8766   }
8767 
8768   // Finalize the recipe for Instr, first if it is not predicated.
8769   if (!IsPredicated) {
8770     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8771     VPBB->appendRecipe(Recipe);
8772     return VPBB;
8773   }
8774   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8775 
8776   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8777   assert(SingleSucc && "VPBB must have a single successor when handling "
8778                        "predicated replication.");
8779   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8780   // Record predicated instructions for above packing optimizations.
8781   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8782   VPBlockUtils::insertBlockAfter(Region, VPBB);
8783   auto *RegSucc = new VPBasicBlock();
8784   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8785   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8786   return RegSucc;
8787 }
8788 
8789 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8790                                                       VPRecipeBase *PredRecipe,
8791                                                       VPlanPtr &Plan) {
8792   // Instructions marked for predication are replicated and placed under an
8793   // if-then construct to prevent side-effects.
8794 
8795   // Generate recipes to compute the block mask for this region.
8796   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8797 
8798   // Build the triangular if-then region.
8799   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8800   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8801   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8802   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8803   auto *PHIRecipe = Instr->getType()->isVoidTy()
8804                         ? nullptr
8805                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8806   if (PHIRecipe) {
8807     Plan->removeVPValueFor(Instr);
8808     Plan->addVPValue(Instr, PHIRecipe);
8809   }
8810   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8811   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8812   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8813 
8814   // Note: first set Entry as region entry and then connect successors starting
8815   // from it in order, to propagate the "parent" of each VPBasicBlock.
8816   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8817   VPBlockUtils::connectBlocks(Pred, Exit);
8818 
8819   return Region;
8820 }
8821 
8822 VPRecipeOrVPValueTy
8823 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8824                                         ArrayRef<VPValue *> Operands,
8825                                         VFRange &Range, VPlanPtr &Plan) {
8826   // First, check for specific widening recipes that deal with calls, memory
8827   // operations, inductions and Phi nodes.
8828   if (auto *CI = dyn_cast<CallInst>(Instr))
8829     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8830 
8831   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8832     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8833 
8834   VPRecipeBase *Recipe;
8835   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8836     if (Phi->getParent() != OrigLoop->getHeader())
8837       return tryToBlend(Phi, Operands, Plan);
8838     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8839       return toVPRecipeResult(Recipe);
8840 
8841     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8842     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8843       VPValue *StartV = Operands[0];
8844       if (Legal->isReductionVariable(Phi)) {
8845         const RecurrenceDescriptor &RdxDesc =
8846             Legal->getReductionVars().find(Phi)->second;
8847         assert(RdxDesc.getRecurrenceStartValue() ==
8848                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8849         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8850                                              CM.isInLoopReduction(Phi),
8851                                              CM.useOrderedReductions(RdxDesc));
8852       } else {
8853         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8854       }
8855 
8856       // Record the incoming value from the backedge, so we can add the incoming
8857       // value from the backedge after all recipes have been created.
8858       recordRecipeOf(cast<Instruction>(
8859           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8860       PhisToFix.push_back(PhiRecipe);
8861     } else {
8862       // TODO: record backedge value for remaining pointer induction phis.
8863       assert(Phi->getType()->isPointerTy() &&
8864              "only pointer phis should be handled here");
8865       assert(Legal->getInductionVars().count(Phi) &&
8866              "Not an induction variable");
8867       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8868       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8869       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8870     }
8871 
8872     return toVPRecipeResult(PhiRecipe);
8873   }
8874 
8875   if (isa<TruncInst>(Instr) &&
8876       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8877                                                Range, *Plan)))
8878     return toVPRecipeResult(Recipe);
8879 
8880   if (!shouldWiden(Instr, Range))
8881     return nullptr;
8882 
8883   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8884     return toVPRecipeResult(new VPWidenGEPRecipe(
8885         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8886 
8887   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8888     bool InvariantCond =
8889         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8890     return toVPRecipeResult(new VPWidenSelectRecipe(
8891         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8892   }
8893 
8894   return toVPRecipeResult(tryToWiden(Instr, Operands));
8895 }
8896 
8897 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8898                                                         ElementCount MaxVF) {
8899   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8900 
8901   // Collect instructions from the original loop that will become trivially dead
8902   // in the vectorized loop. We don't need to vectorize these instructions. For
8903   // example, original induction update instructions can become dead because we
8904   // separately emit induction "steps" when generating code for the new loop.
8905   // Similarly, we create a new latch condition when setting up the structure
8906   // of the new loop, so the old one can become dead.
8907   SmallPtrSet<Instruction *, 4> DeadInstructions;
8908   collectTriviallyDeadInstructions(DeadInstructions);
8909 
8910   // Add assume instructions we need to drop to DeadInstructions, to prevent
8911   // them from being added to the VPlan.
8912   // TODO: We only need to drop assumes in blocks that get flattend. If the
8913   // control flow is preserved, we should keep them.
8914   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8915   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8916 
8917   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8918   // Dead instructions do not need sinking. Remove them from SinkAfter.
8919   for (Instruction *I : DeadInstructions)
8920     SinkAfter.erase(I);
8921 
8922   // Cannot sink instructions after dead instructions (there won't be any
8923   // recipes for them). Instead, find the first non-dead previous instruction.
8924   for (auto &P : Legal->getSinkAfter()) {
8925     Instruction *SinkTarget = P.second;
8926     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8927     (void)FirstInst;
8928     while (DeadInstructions.contains(SinkTarget)) {
8929       assert(
8930           SinkTarget != FirstInst &&
8931           "Must find a live instruction (at least the one feeding the "
8932           "first-order recurrence PHI) before reaching beginning of the block");
8933       SinkTarget = SinkTarget->getPrevNode();
8934       assert(SinkTarget != P.first &&
8935              "sink source equals target, no sinking required");
8936     }
8937     P.second = SinkTarget;
8938   }
8939 
8940   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8941   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8942     VFRange SubRange = {VF, MaxVFPlusOne};
8943     VPlans.push_back(
8944         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8945     VF = SubRange.End;
8946   }
8947 }
8948 
8949 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8950 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8951 // BranchOnCount VPInstruction to the latch.
8952 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8953                                   bool HasNUW, bool IsVPlanNative) {
8954   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8955   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8956 
8957   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8958   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8959   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8960   if (IsVPlanNative)
8961     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8962   Header->insert(CanonicalIVPHI, Header->begin());
8963 
8964   auto *CanonicalIVIncrement =
8965       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8966                                : VPInstruction::CanonicalIVIncrement,
8967                         {CanonicalIVPHI}, DL);
8968   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8969 
8970   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8971   if (IsVPlanNative) {
8972     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8973     EB->setCondBit(nullptr);
8974   }
8975   EB->appendRecipe(CanonicalIVIncrement);
8976 
8977   auto *BranchOnCount =
8978       new VPInstruction(VPInstruction::BranchOnCount,
8979                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8980   EB->appendRecipe(BranchOnCount);
8981 }
8982 
8983 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8984     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8985     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8986 
8987   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8988 
8989   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8990 
8991   // ---------------------------------------------------------------------------
8992   // Pre-construction: record ingredients whose recipes we'll need to further
8993   // process after constructing the initial VPlan.
8994   // ---------------------------------------------------------------------------
8995 
8996   // Mark instructions we'll need to sink later and their targets as
8997   // ingredients whose recipe we'll need to record.
8998   for (auto &Entry : SinkAfter) {
8999     RecipeBuilder.recordRecipeOf(Entry.first);
9000     RecipeBuilder.recordRecipeOf(Entry.second);
9001   }
9002   for (auto &Reduction : CM.getInLoopReductionChains()) {
9003     PHINode *Phi = Reduction.first;
9004     RecurKind Kind =
9005         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
9006     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9007 
9008     RecipeBuilder.recordRecipeOf(Phi);
9009     for (auto &R : ReductionOperations) {
9010       RecipeBuilder.recordRecipeOf(R);
9011       // For min/max reducitons, where we have a pair of icmp/select, we also
9012       // need to record the ICmp recipe, so it can be removed later.
9013       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9014              "Only min/max recurrences allowed for inloop reductions");
9015       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9016         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9017     }
9018   }
9019 
9020   // For each interleave group which is relevant for this (possibly trimmed)
9021   // Range, add it to the set of groups to be later applied to the VPlan and add
9022   // placeholders for its members' Recipes which we'll be replacing with a
9023   // single VPInterleaveRecipe.
9024   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9025     auto applyIG = [IG, this](ElementCount VF) -> bool {
9026       return (VF.isVector() && // Query is illegal for VF == 1
9027               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9028                   LoopVectorizationCostModel::CM_Interleave);
9029     };
9030     if (!getDecisionAndClampRange(applyIG, Range))
9031       continue;
9032     InterleaveGroups.insert(IG);
9033     for (unsigned i = 0; i < IG->getFactor(); i++)
9034       if (Instruction *Member = IG->getMember(i))
9035         RecipeBuilder.recordRecipeOf(Member);
9036   };
9037 
9038   // ---------------------------------------------------------------------------
9039   // Build initial VPlan: Scan the body of the loop in a topological order to
9040   // visit each basic block after having visited its predecessor basic blocks.
9041   // ---------------------------------------------------------------------------
9042 
9043   // Create initial VPlan skeleton, with separate header and latch blocks.
9044   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
9045   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
9046   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
9047   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
9048   auto Plan = std::make_unique<VPlan>(TopRegion);
9049 
9050   Instruction *DLInst =
9051       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
9052   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
9053                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
9054                         !CM.foldTailByMasking(), false);
9055 
9056   // Scan the body of the loop in a topological order to visit each basic block
9057   // after having visited its predecessor basic blocks.
9058   LoopBlocksDFS DFS(OrigLoop);
9059   DFS.perform(LI);
9060 
9061   VPBasicBlock *VPBB = HeaderVPBB;
9062   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
9063   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9064     // Relevant instructions from basic block BB will be grouped into VPRecipe
9065     // ingredients and fill a new VPBasicBlock.
9066     unsigned VPBBsForBB = 0;
9067     VPBB->setName(BB->getName());
9068     Builder.setInsertPoint(VPBB);
9069 
9070     // Introduce each ingredient into VPlan.
9071     // TODO: Model and preserve debug instrinsics in VPlan.
9072     for (Instruction &I : BB->instructionsWithoutDebug()) {
9073       Instruction *Instr = &I;
9074 
9075       // First filter out irrelevant instructions, to ensure no recipes are
9076       // built for them.
9077       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9078         continue;
9079 
9080       SmallVector<VPValue *, 4> Operands;
9081       auto *Phi = dyn_cast<PHINode>(Instr);
9082       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9083         Operands.push_back(Plan->getOrAddVPValue(
9084             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9085       } else {
9086         auto OpRange = Plan->mapToVPValues(Instr->operands());
9087         Operands = {OpRange.begin(), OpRange.end()};
9088       }
9089       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9090               Instr, Operands, Range, Plan)) {
9091         // If Instr can be simplified to an existing VPValue, use it.
9092         if (RecipeOrValue.is<VPValue *>()) {
9093           auto *VPV = RecipeOrValue.get<VPValue *>();
9094           Plan->addVPValue(Instr, VPV);
9095           // If the re-used value is a recipe, register the recipe for the
9096           // instruction, in case the recipe for Instr needs to be recorded.
9097           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9098             RecipeBuilder.setRecipe(Instr, R);
9099           continue;
9100         }
9101         // Otherwise, add the new recipe.
9102         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9103         for (auto *Def : Recipe->definedValues()) {
9104           auto *UV = Def->getUnderlyingValue();
9105           Plan->addVPValue(UV, Def);
9106         }
9107 
9108         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
9109             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
9110           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
9111           // of the header block. That can happen for truncates of induction
9112           // variables. Those recipes are moved to the phi section of the header
9113           // block after applying SinkAfter, which relies on the original
9114           // position of the trunc.
9115           assert(isa<TruncInst>(Instr));
9116           InductionsToMove.push_back(
9117               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
9118         }
9119         RecipeBuilder.setRecipe(Instr, Recipe);
9120         VPBB->appendRecipe(Recipe);
9121         continue;
9122       }
9123 
9124       // Otherwise, if all widening options failed, Instruction is to be
9125       // replicated. This may create a successor for VPBB.
9126       VPBasicBlock *NextVPBB =
9127           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9128       if (NextVPBB != VPBB) {
9129         VPBB = NextVPBB;
9130         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9131                                     : "");
9132       }
9133     }
9134 
9135     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
9136     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
9137   }
9138 
9139   // Fold the last, empty block into its predecessor.
9140   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
9141   assert(VPBB && "expected to fold last (empty) block");
9142   // After here, VPBB should not be used.
9143   VPBB = nullptr;
9144 
9145   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
9146          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
9147          "entry block must be set to a VPRegionBlock having a non-empty entry "
9148          "VPBasicBlock");
9149   RecipeBuilder.fixHeaderPhis();
9150 
9151   // ---------------------------------------------------------------------------
9152   // Transform initial VPlan: Apply previously taken decisions, in order, to
9153   // bring the VPlan to its final state.
9154   // ---------------------------------------------------------------------------
9155 
9156   // Apply Sink-After legal constraints.
9157   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9158     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9159     if (Region && Region->isReplicator()) {
9160       assert(Region->getNumSuccessors() == 1 &&
9161              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9162       assert(R->getParent()->size() == 1 &&
9163              "A recipe in an original replicator region must be the only "
9164              "recipe in its block");
9165       return Region;
9166     }
9167     return nullptr;
9168   };
9169   for (auto &Entry : SinkAfter) {
9170     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9171     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9172 
9173     auto *TargetRegion = GetReplicateRegion(Target);
9174     auto *SinkRegion = GetReplicateRegion(Sink);
9175     if (!SinkRegion) {
9176       // If the sink source is not a replicate region, sink the recipe directly.
9177       if (TargetRegion) {
9178         // The target is in a replication region, make sure to move Sink to
9179         // the block after it, not into the replication region itself.
9180         VPBasicBlock *NextBlock =
9181             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9182         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9183       } else
9184         Sink->moveAfter(Target);
9185       continue;
9186     }
9187 
9188     // The sink source is in a replicate region. Unhook the region from the CFG.
9189     auto *SinkPred = SinkRegion->getSinglePredecessor();
9190     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9191     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9192     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9193     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9194 
9195     if (TargetRegion) {
9196       // The target recipe is also in a replicate region, move the sink region
9197       // after the target region.
9198       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9199       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9200       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9201       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9202     } else {
9203       // The sink source is in a replicate region, we need to move the whole
9204       // replicate region, which should only contain a single recipe in the
9205       // main block.
9206       auto *SplitBlock =
9207           Target->getParent()->splitAt(std::next(Target->getIterator()));
9208 
9209       auto *SplitPred = SplitBlock->getSinglePredecessor();
9210 
9211       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9212       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9213       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9214     }
9215   }
9216 
9217   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9218 
9219   // Now that sink-after is done, move induction recipes for optimized truncates
9220   // to the phi section of the header block.
9221   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9222     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9223 
9224   // Adjust the recipes for any inloop reductions.
9225   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9226                              RecipeBuilder, Range.Start);
9227 
9228   // Introduce a recipe to combine the incoming and previous values of a
9229   // first-order recurrence.
9230   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9231     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9232     if (!RecurPhi)
9233       continue;
9234 
9235     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9236     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9237     auto *Region = GetReplicateRegion(PrevRecipe);
9238     if (Region)
9239       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9240     if (Region || PrevRecipe->isPhi())
9241       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9242     else
9243       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9244 
9245     auto *RecurSplice = cast<VPInstruction>(
9246         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9247                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9248 
9249     RecurPhi->replaceAllUsesWith(RecurSplice);
9250     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9251     // all users.
9252     RecurSplice->setOperand(0, RecurPhi);
9253   }
9254 
9255   // Interleave memory: for each Interleave Group we marked earlier as relevant
9256   // for this VPlan, replace the Recipes widening its memory instructions with a
9257   // single VPInterleaveRecipe at its insertion point.
9258   for (auto IG : InterleaveGroups) {
9259     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9260         RecipeBuilder.getRecipe(IG->getInsertPos()));
9261     SmallVector<VPValue *, 4> StoredValues;
9262     for (unsigned i = 0; i < IG->getFactor(); ++i)
9263       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9264         auto *StoreR =
9265             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9266         StoredValues.push_back(StoreR->getStoredValue());
9267       }
9268 
9269     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9270                                         Recipe->getMask());
9271     VPIG->insertBefore(Recipe);
9272     unsigned J = 0;
9273     for (unsigned i = 0; i < IG->getFactor(); ++i)
9274       if (Instruction *Member = IG->getMember(i)) {
9275         if (!Member->getType()->isVoidTy()) {
9276           VPValue *OriginalV = Plan->getVPValue(Member);
9277           Plan->removeVPValueFor(Member);
9278           Plan->addVPValue(Member, VPIG->getVPValue(J));
9279           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9280           J++;
9281         }
9282         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9283       }
9284   }
9285 
9286   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9287   // in ways that accessing values using original IR values is incorrect.
9288   Plan->disableValue2VPValue();
9289 
9290   VPlanTransforms::sinkScalarOperands(*Plan);
9291   VPlanTransforms::mergeReplicateRegions(*Plan);
9292 
9293   std::string PlanName;
9294   raw_string_ostream RSO(PlanName);
9295   ElementCount VF = Range.Start;
9296   Plan->addVF(VF);
9297   RSO << "Initial VPlan for VF={" << VF;
9298   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9299     Plan->addVF(VF);
9300     RSO << "," << VF;
9301   }
9302   RSO << "},UF>=1";
9303   RSO.flush();
9304   Plan->setName(PlanName);
9305 
9306   // Fold Exit block into its predecessor if possible.
9307   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9308   // VPBasicBlock as exit.
9309   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9310 
9311   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9312   return Plan;
9313 }
9314 
9315 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9316   // Outer loop handling: They may require CFG and instruction level
9317   // transformations before even evaluating whether vectorization is profitable.
9318   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9319   // the vectorization pipeline.
9320   assert(!OrigLoop->isInnermost());
9321   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9322 
9323   // Create new empty VPlan
9324   auto Plan = std::make_unique<VPlan>();
9325 
9326   // Build hierarchical CFG
9327   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9328   HCFGBuilder.buildHierarchicalCFG();
9329 
9330   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9331        VF *= 2)
9332     Plan->addVF(VF);
9333 
9334   if (EnableVPlanPredication) {
9335     VPlanPredicator VPP(*Plan);
9336     VPP.predicate();
9337 
9338     // Avoid running transformation to recipes until masked code generation in
9339     // VPlan-native path is in place.
9340     return Plan;
9341   }
9342 
9343   SmallPtrSet<Instruction *, 1> DeadInstructions;
9344   VPlanTransforms::VPInstructionsToVPRecipes(
9345       OrigLoop, Plan,
9346       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9347       DeadInstructions, *PSE.getSE());
9348 
9349   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9350                         true, true);
9351   return Plan;
9352 }
9353 
9354 // Adjust the recipes for reductions. For in-loop reductions the chain of
9355 // instructions leading from the loop exit instr to the phi need to be converted
9356 // to reductions, with one operand being vector and the other being the scalar
9357 // reduction chain. For other reductions, a select is introduced between the phi
9358 // and live-out recipes when folding the tail.
9359 void LoopVectorizationPlanner::adjustRecipesForReductions(
9360     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9361     ElementCount MinVF) {
9362   for (auto &Reduction : CM.getInLoopReductionChains()) {
9363     PHINode *Phi = Reduction.first;
9364     const RecurrenceDescriptor &RdxDesc =
9365         Legal->getReductionVars().find(Phi)->second;
9366     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9367 
9368     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9369       continue;
9370 
9371     // ReductionOperations are orders top-down from the phi's use to the
9372     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9373     // which of the two operands will remain scalar and which will be reduced.
9374     // For minmax the chain will be the select instructions.
9375     Instruction *Chain = Phi;
9376     for (Instruction *R : ReductionOperations) {
9377       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9378       RecurKind Kind = RdxDesc.getRecurrenceKind();
9379 
9380       VPValue *ChainOp = Plan->getVPValue(Chain);
9381       unsigned FirstOpId;
9382       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9383              "Only min/max recurrences allowed for inloop reductions");
9384       // Recognize a call to the llvm.fmuladd intrinsic.
9385       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9386       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9387              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9388       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9389         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9390                "Expected to replace a VPWidenSelectSC");
9391         FirstOpId = 1;
9392       } else {
9393         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9394                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9395                "Expected to replace a VPWidenSC");
9396         FirstOpId = 0;
9397       }
9398       unsigned VecOpId =
9399           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9400       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9401 
9402       auto *CondOp = CM.foldTailByMasking()
9403                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9404                          : nullptr;
9405 
9406       if (IsFMulAdd) {
9407         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9408         // need to create an fmul recipe to use as the vector operand for the
9409         // fadd reduction.
9410         VPInstruction *FMulRecipe = new VPInstruction(
9411             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9412         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9413         WidenRecipe->getParent()->insert(FMulRecipe,
9414                                          WidenRecipe->getIterator());
9415         VecOp = FMulRecipe;
9416       }
9417       VPReductionRecipe *RedRecipe =
9418           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9419       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9420       Plan->removeVPValueFor(R);
9421       Plan->addVPValue(R, RedRecipe);
9422       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9423       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9424       WidenRecipe->eraseFromParent();
9425 
9426       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9427         VPRecipeBase *CompareRecipe =
9428             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9429         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9430                "Expected to replace a VPWidenSC");
9431         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9432                "Expected no remaining users");
9433         CompareRecipe->eraseFromParent();
9434       }
9435       Chain = R;
9436     }
9437   }
9438 
9439   // If tail is folded by masking, introduce selects between the phi
9440   // and the live-out instruction of each reduction, at the beginning of the
9441   // dedicated latch block.
9442   if (CM.foldTailByMasking()) {
9443     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9444     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9445       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9446       if (!PhiR || PhiR->isInLoop())
9447         continue;
9448       VPValue *Cond =
9449           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9450       VPValue *Red = PhiR->getBackedgeValue();
9451       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9452              "reduction recipe must be defined before latch");
9453       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9454     }
9455   }
9456 }
9457 
9458 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9459 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9460                                VPSlotTracker &SlotTracker) const {
9461   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9462   IG->getInsertPos()->printAsOperand(O, false);
9463   O << ", ";
9464   getAddr()->printAsOperand(O, SlotTracker);
9465   VPValue *Mask = getMask();
9466   if (Mask) {
9467     O << ", ";
9468     Mask->printAsOperand(O, SlotTracker);
9469   }
9470 
9471   unsigned OpIdx = 0;
9472   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9473     if (!IG->getMember(i))
9474       continue;
9475     if (getNumStoreOperands() > 0) {
9476       O << "\n" << Indent << "  store ";
9477       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9478       O << " to index " << i;
9479     } else {
9480       O << "\n" << Indent << "  ";
9481       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9482       O << " = load from index " << i;
9483     }
9484     ++OpIdx;
9485   }
9486 }
9487 #endif
9488 
9489 void VPWidenCallRecipe::execute(VPTransformState &State) {
9490   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9491                                   *this, State);
9492 }
9493 
9494 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9495   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9496   State.ILV->setDebugLocFromInst(&I);
9497 
9498   // The condition can be loop invariant  but still defined inside the
9499   // loop. This means that we can't just use the original 'cond' value.
9500   // We have to take the 'vectorized' value and pick the first lane.
9501   // Instcombine will make this a no-op.
9502   auto *InvarCond =
9503       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9504 
9505   for (unsigned Part = 0; Part < State.UF; ++Part) {
9506     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9507     Value *Op0 = State.get(getOperand(1), Part);
9508     Value *Op1 = State.get(getOperand(2), Part);
9509     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9510     State.set(this, Sel, Part);
9511     State.ILV->addMetadata(Sel, &I);
9512   }
9513 }
9514 
9515 void VPWidenRecipe::execute(VPTransformState &State) {
9516   auto &I = *cast<Instruction>(getUnderlyingValue());
9517   auto &Builder = State.Builder;
9518   switch (I.getOpcode()) {
9519   case Instruction::Call:
9520   case Instruction::Br:
9521   case Instruction::PHI:
9522   case Instruction::GetElementPtr:
9523   case Instruction::Select:
9524     llvm_unreachable("This instruction is handled by a different recipe.");
9525   case Instruction::UDiv:
9526   case Instruction::SDiv:
9527   case Instruction::SRem:
9528   case Instruction::URem:
9529   case Instruction::Add:
9530   case Instruction::FAdd:
9531   case Instruction::Sub:
9532   case Instruction::FSub:
9533   case Instruction::FNeg:
9534   case Instruction::Mul:
9535   case Instruction::FMul:
9536   case Instruction::FDiv:
9537   case Instruction::FRem:
9538   case Instruction::Shl:
9539   case Instruction::LShr:
9540   case Instruction::AShr:
9541   case Instruction::And:
9542   case Instruction::Or:
9543   case Instruction::Xor: {
9544     // Just widen unops and binops.
9545     State.ILV->setDebugLocFromInst(&I);
9546 
9547     for (unsigned Part = 0; Part < State.UF; ++Part) {
9548       SmallVector<Value *, 2> Ops;
9549       for (VPValue *VPOp : operands())
9550         Ops.push_back(State.get(VPOp, Part));
9551 
9552       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9553 
9554       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9555         VecOp->copyIRFlags(&I);
9556 
9557         // If the instruction is vectorized and was in a basic block that needed
9558         // predication, we can't propagate poison-generating flags (nuw/nsw,
9559         // exact, etc.). The control flow has been linearized and the
9560         // instruction is no longer guarded by the predicate, which could make
9561         // the flag properties to no longer hold.
9562         if (State.MayGeneratePoisonRecipes.contains(this))
9563           VecOp->dropPoisonGeneratingFlags();
9564       }
9565 
9566       // Use this vector value for all users of the original instruction.
9567       State.set(this, V, Part);
9568       State.ILV->addMetadata(V, &I);
9569     }
9570 
9571     break;
9572   }
9573   case Instruction::ICmp:
9574   case Instruction::FCmp: {
9575     // Widen compares. Generate vector compares.
9576     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9577     auto *Cmp = cast<CmpInst>(&I);
9578     State.ILV->setDebugLocFromInst(Cmp);
9579     for (unsigned Part = 0; Part < State.UF; ++Part) {
9580       Value *A = State.get(getOperand(0), Part);
9581       Value *B = State.get(getOperand(1), Part);
9582       Value *C = nullptr;
9583       if (FCmp) {
9584         // Propagate fast math flags.
9585         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9586         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9587         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9588       } else {
9589         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9590       }
9591       State.set(this, C, Part);
9592       State.ILV->addMetadata(C, &I);
9593     }
9594 
9595     break;
9596   }
9597 
9598   case Instruction::ZExt:
9599   case Instruction::SExt:
9600   case Instruction::FPToUI:
9601   case Instruction::FPToSI:
9602   case Instruction::FPExt:
9603   case Instruction::PtrToInt:
9604   case Instruction::IntToPtr:
9605   case Instruction::SIToFP:
9606   case Instruction::UIToFP:
9607   case Instruction::Trunc:
9608   case Instruction::FPTrunc:
9609   case Instruction::BitCast: {
9610     auto *CI = cast<CastInst>(&I);
9611     State.ILV->setDebugLocFromInst(CI);
9612 
9613     /// Vectorize casts.
9614     Type *DestTy = (State.VF.isScalar())
9615                        ? CI->getType()
9616                        : VectorType::get(CI->getType(), State.VF);
9617 
9618     for (unsigned Part = 0; Part < State.UF; ++Part) {
9619       Value *A = State.get(getOperand(0), Part);
9620       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9621       State.set(this, Cast, Part);
9622       State.ILV->addMetadata(Cast, &I);
9623     }
9624     break;
9625   }
9626   default:
9627     // This instruction is not vectorized by simple widening.
9628     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9629     llvm_unreachable("Unhandled instruction!");
9630   } // end of switch.
9631 }
9632 
9633 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9634   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9635   // Construct a vector GEP by widening the operands of the scalar GEP as
9636   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9637   // results in a vector of pointers when at least one operand of the GEP
9638   // is vector-typed. Thus, to keep the representation compact, we only use
9639   // vector-typed operands for loop-varying values.
9640 
9641   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9642     // If we are vectorizing, but the GEP has only loop-invariant operands,
9643     // the GEP we build (by only using vector-typed operands for
9644     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9645     // produce a vector of pointers, we need to either arbitrarily pick an
9646     // operand to broadcast, or broadcast a clone of the original GEP.
9647     // Here, we broadcast a clone of the original.
9648     //
9649     // TODO: If at some point we decide to scalarize instructions having
9650     //       loop-invariant operands, this special case will no longer be
9651     //       required. We would add the scalarization decision to
9652     //       collectLoopScalars() and teach getVectorValue() to broadcast
9653     //       the lane-zero scalar value.
9654     auto *Clone = State.Builder.Insert(GEP->clone());
9655     for (unsigned Part = 0; Part < State.UF; ++Part) {
9656       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9657       State.set(this, EntryPart, Part);
9658       State.ILV->addMetadata(EntryPart, GEP);
9659     }
9660   } else {
9661     // If the GEP has at least one loop-varying operand, we are sure to
9662     // produce a vector of pointers. But if we are only unrolling, we want
9663     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9664     // produce with the code below will be scalar (if VF == 1) or vector
9665     // (otherwise). Note that for the unroll-only case, we still maintain
9666     // values in the vector mapping with initVector, as we do for other
9667     // instructions.
9668     for (unsigned Part = 0; Part < State.UF; ++Part) {
9669       // The pointer operand of the new GEP. If it's loop-invariant, we
9670       // won't broadcast it.
9671       auto *Ptr = IsPtrLoopInvariant
9672                       ? State.get(getOperand(0), VPIteration(0, 0))
9673                       : State.get(getOperand(0), Part);
9674 
9675       // Collect all the indices for the new GEP. If any index is
9676       // loop-invariant, we won't broadcast it.
9677       SmallVector<Value *, 4> Indices;
9678       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9679         VPValue *Operand = getOperand(I);
9680         if (IsIndexLoopInvariant[I - 1])
9681           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9682         else
9683           Indices.push_back(State.get(Operand, Part));
9684       }
9685 
9686       // If the GEP instruction is vectorized and was in a basic block that
9687       // needed predication, we can't propagate the poison-generating 'inbounds'
9688       // flag. The control flow has been linearized and the GEP is no longer
9689       // guarded by the predicate, which could make the 'inbounds' properties to
9690       // no longer hold.
9691       bool IsInBounds =
9692           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9693 
9694       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9695       // but it should be a vector, otherwise.
9696       auto *NewGEP = IsInBounds
9697                          ? State.Builder.CreateInBoundsGEP(
9698                                GEP->getSourceElementType(), Ptr, Indices)
9699                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9700                                                    Ptr, Indices);
9701       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9702              "NewGEP is not a pointer vector");
9703       State.set(this, NewGEP, Part);
9704       State.ILV->addMetadata(NewGEP, GEP);
9705     }
9706   }
9707 }
9708 
9709 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9710   assert(!State.Instance && "Int or FP induction being replicated.");
9711   auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9712   State.ILV->widenIntOrFpInduction(IV, getInductionDescriptor(),
9713                                    getStartValue()->getLiveInIRValue(),
9714                                    getTruncInst(), this, State, CanonicalIV);
9715 }
9716 
9717 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9718   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9719                                  State);
9720 }
9721 
9722 void VPBlendRecipe::execute(VPTransformState &State) {
9723   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9724   // We know that all PHIs in non-header blocks are converted into
9725   // selects, so we don't have to worry about the insertion order and we
9726   // can just use the builder.
9727   // At this point we generate the predication tree. There may be
9728   // duplications since this is a simple recursive scan, but future
9729   // optimizations will clean it up.
9730 
9731   unsigned NumIncoming = getNumIncomingValues();
9732 
9733   // Generate a sequence of selects of the form:
9734   // SELECT(Mask3, In3,
9735   //        SELECT(Mask2, In2,
9736   //               SELECT(Mask1, In1,
9737   //                      In0)))
9738   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9739   // are essentially undef are taken from In0.
9740   InnerLoopVectorizer::VectorParts Entry(State.UF);
9741   for (unsigned In = 0; In < NumIncoming; ++In) {
9742     for (unsigned Part = 0; Part < State.UF; ++Part) {
9743       // We might have single edge PHIs (blocks) - use an identity
9744       // 'select' for the first PHI operand.
9745       Value *In0 = State.get(getIncomingValue(In), Part);
9746       if (In == 0)
9747         Entry[Part] = In0; // Initialize with the first incoming value.
9748       else {
9749         // Select between the current value and the previous incoming edge
9750         // based on the incoming mask.
9751         Value *Cond = State.get(getMask(In), Part);
9752         Entry[Part] =
9753             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9754       }
9755     }
9756   }
9757   for (unsigned Part = 0; Part < State.UF; ++Part)
9758     State.set(this, Entry[Part], Part);
9759 }
9760 
9761 void VPInterleaveRecipe::execute(VPTransformState &State) {
9762   assert(!State.Instance && "Interleave group being replicated.");
9763   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9764                                       getStoredValues(), getMask());
9765 }
9766 
9767 void VPReductionRecipe::execute(VPTransformState &State) {
9768   assert(!State.Instance && "Reduction being replicated.");
9769   Value *PrevInChain = State.get(getChainOp(), 0);
9770   RecurKind Kind = RdxDesc->getRecurrenceKind();
9771   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9772   // Propagate the fast-math flags carried by the underlying instruction.
9773   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9774   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9775   for (unsigned Part = 0; Part < State.UF; ++Part) {
9776     Value *NewVecOp = State.get(getVecOp(), Part);
9777     if (VPValue *Cond = getCondOp()) {
9778       Value *NewCond = State.get(Cond, Part);
9779       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9780       Value *Iden = RdxDesc->getRecurrenceIdentity(
9781           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9782       Value *IdenVec =
9783           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9784       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9785       NewVecOp = Select;
9786     }
9787     Value *NewRed;
9788     Value *NextInChain;
9789     if (IsOrdered) {
9790       if (State.VF.isVector())
9791         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9792                                         PrevInChain);
9793       else
9794         NewRed = State.Builder.CreateBinOp(
9795             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9796             NewVecOp);
9797       PrevInChain = NewRed;
9798     } else {
9799       PrevInChain = State.get(getChainOp(), Part);
9800       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9801     }
9802     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9803       NextInChain =
9804           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9805                          NewRed, PrevInChain);
9806     } else if (IsOrdered)
9807       NextInChain = NewRed;
9808     else
9809       NextInChain = State.Builder.CreateBinOp(
9810           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9811           PrevInChain);
9812     State.set(this, NextInChain, Part);
9813   }
9814 }
9815 
9816 void VPReplicateRecipe::execute(VPTransformState &State) {
9817   if (State.Instance) { // Generate a single instance.
9818     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9819     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9820                                     IsPredicated, State);
9821     // Insert scalar instance packing it into a vector.
9822     if (AlsoPack && State.VF.isVector()) {
9823       // If we're constructing lane 0, initialize to start from poison.
9824       if (State.Instance->Lane.isFirstLane()) {
9825         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9826         Value *Poison = PoisonValue::get(
9827             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9828         State.set(this, Poison, State.Instance->Part);
9829       }
9830       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9831     }
9832     return;
9833   }
9834 
9835   // Generate scalar instances for all VF lanes of all UF parts, unless the
9836   // instruction is uniform inwhich case generate only the first lane for each
9837   // of the UF parts.
9838   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9839   assert((!State.VF.isScalable() || IsUniform) &&
9840          "Can't scalarize a scalable vector");
9841   for (unsigned Part = 0; Part < State.UF; ++Part)
9842     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9843       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9844                                       VPIteration(Part, Lane), IsPredicated,
9845                                       State);
9846 }
9847 
9848 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9849   assert(State.Instance && "Branch on Mask works only on single instance.");
9850 
9851   unsigned Part = State.Instance->Part;
9852   unsigned Lane = State.Instance->Lane.getKnownLane();
9853 
9854   Value *ConditionBit = nullptr;
9855   VPValue *BlockInMask = getMask();
9856   if (BlockInMask) {
9857     ConditionBit = State.get(BlockInMask, Part);
9858     if (ConditionBit->getType()->isVectorTy())
9859       ConditionBit = State.Builder.CreateExtractElement(
9860           ConditionBit, State.Builder.getInt32(Lane));
9861   } else // Block in mask is all-one.
9862     ConditionBit = State.Builder.getTrue();
9863 
9864   // Replace the temporary unreachable terminator with a new conditional branch,
9865   // whose two destinations will be set later when they are created.
9866   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9867   assert(isa<UnreachableInst>(CurrentTerminator) &&
9868          "Expected to replace unreachable terminator with conditional branch.");
9869   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9870   CondBr->setSuccessor(0, nullptr);
9871   ReplaceInstWithInst(CurrentTerminator, CondBr);
9872 }
9873 
9874 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9875   assert(State.Instance && "Predicated instruction PHI works per instance.");
9876   Instruction *ScalarPredInst =
9877       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9878   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9879   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9880   assert(PredicatingBB && "Predicated block has no single predecessor.");
9881   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9882          "operand must be VPReplicateRecipe");
9883 
9884   // By current pack/unpack logic we need to generate only a single phi node: if
9885   // a vector value for the predicated instruction exists at this point it means
9886   // the instruction has vector users only, and a phi for the vector value is
9887   // needed. In this case the recipe of the predicated instruction is marked to
9888   // also do that packing, thereby "hoisting" the insert-element sequence.
9889   // Otherwise, a phi node for the scalar value is needed.
9890   unsigned Part = State.Instance->Part;
9891   if (State.hasVectorValue(getOperand(0), Part)) {
9892     Value *VectorValue = State.get(getOperand(0), Part);
9893     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9894     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9895     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9896     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9897     if (State.hasVectorValue(this, Part))
9898       State.reset(this, VPhi, Part);
9899     else
9900       State.set(this, VPhi, Part);
9901     // NOTE: Currently we need to update the value of the operand, so the next
9902     // predicated iteration inserts its generated value in the correct vector.
9903     State.reset(getOperand(0), VPhi, Part);
9904   } else {
9905     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9906     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9907     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9908                      PredicatingBB);
9909     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9910     if (State.hasScalarValue(this, *State.Instance))
9911       State.reset(this, Phi, *State.Instance);
9912     else
9913       State.set(this, Phi, *State.Instance);
9914     // NOTE: Currently we need to update the value of the operand, so the next
9915     // predicated iteration inserts its generated value in the correct vector.
9916     State.reset(getOperand(0), Phi, *State.Instance);
9917   }
9918 }
9919 
9920 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9921   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9922 
9923   // Attempt to issue a wide load.
9924   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9925   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9926 
9927   assert((LI || SI) && "Invalid Load/Store instruction");
9928   assert((!SI || StoredValue) && "No stored value provided for widened store");
9929   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9930 
9931   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9932 
9933   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9934   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9935   bool CreateGatherScatter = !Consecutive;
9936 
9937   auto &Builder = State.Builder;
9938   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9939   bool isMaskRequired = getMask();
9940   if (isMaskRequired)
9941     for (unsigned Part = 0; Part < State.UF; ++Part)
9942       BlockInMaskParts[Part] = State.get(getMask(), Part);
9943 
9944   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9945     // Calculate the pointer for the specific unroll-part.
9946     GetElementPtrInst *PartPtr = nullptr;
9947 
9948     bool InBounds = false;
9949     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9950       InBounds = gep->isInBounds();
9951     if (Reverse) {
9952       // If the address is consecutive but reversed, then the
9953       // wide store needs to start at the last vector element.
9954       // RunTimeVF =  VScale * VF.getKnownMinValue()
9955       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9956       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9957       // NumElt = -Part * RunTimeVF
9958       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9959       // LastLane = 1 - RunTimeVF
9960       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9961       PartPtr =
9962           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9963       PartPtr->setIsInBounds(InBounds);
9964       PartPtr = cast<GetElementPtrInst>(
9965           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9966       PartPtr->setIsInBounds(InBounds);
9967       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9968         BlockInMaskParts[Part] =
9969             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9970     } else {
9971       Value *Increment =
9972           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9973       PartPtr = cast<GetElementPtrInst>(
9974           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9975       PartPtr->setIsInBounds(InBounds);
9976     }
9977 
9978     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9979     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9980   };
9981 
9982   // Handle Stores:
9983   if (SI) {
9984     State.ILV->setDebugLocFromInst(SI);
9985 
9986     for (unsigned Part = 0; Part < State.UF; ++Part) {
9987       Instruction *NewSI = nullptr;
9988       Value *StoredVal = State.get(StoredValue, Part);
9989       if (CreateGatherScatter) {
9990         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9991         Value *VectorGep = State.get(getAddr(), Part);
9992         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9993                                             MaskPart);
9994       } else {
9995         if (Reverse) {
9996           // If we store to reverse consecutive memory locations, then we need
9997           // to reverse the order of elements in the stored value.
9998           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
9999           // We don't want to update the value in the map as it might be used in
10000           // another expression. So don't call resetVectorValue(StoredVal).
10001         }
10002         auto *VecPtr =
10003             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10004         if (isMaskRequired)
10005           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
10006                                             BlockInMaskParts[Part]);
10007         else
10008           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10009       }
10010       State.ILV->addMetadata(NewSI, SI);
10011     }
10012     return;
10013   }
10014 
10015   // Handle loads.
10016   assert(LI && "Must have a load instruction");
10017   State.ILV->setDebugLocFromInst(LI);
10018   for (unsigned Part = 0; Part < State.UF; ++Part) {
10019     Value *NewLI;
10020     if (CreateGatherScatter) {
10021       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10022       Value *VectorGep = State.get(getAddr(), Part);
10023       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10024                                          nullptr, "wide.masked.gather");
10025       State.ILV->addMetadata(NewLI, LI);
10026     } else {
10027       auto *VecPtr =
10028           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10029       if (isMaskRequired)
10030         NewLI = Builder.CreateMaskedLoad(
10031             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10032             PoisonValue::get(DataTy), "wide.masked.load");
10033       else
10034         NewLI =
10035             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10036 
10037       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10038       State.ILV->addMetadata(NewLI, LI);
10039       if (Reverse)
10040         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10041     }
10042 
10043     State.set(this, NewLI, Part);
10044   }
10045 }
10046 
10047 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10048 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10049 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10050 // for predication.
10051 static ScalarEpilogueLowering getScalarEpilogueLowering(
10052     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10053     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10054     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10055     LoopVectorizationLegality &LVL) {
10056   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10057   // don't look at hints or options, and don't request a scalar epilogue.
10058   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10059   // LoopAccessInfo (due to code dependency and not being able to reliably get
10060   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10061   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10062   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10063   // back to the old way and vectorize with versioning when forced. See D81345.)
10064   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10065                                                       PGSOQueryType::IRPass) &&
10066                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10067     return CM_ScalarEpilogueNotAllowedOptSize;
10068 
10069   // 2) If set, obey the directives
10070   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10071     switch (PreferPredicateOverEpilogue) {
10072     case PreferPredicateTy::ScalarEpilogue:
10073       return CM_ScalarEpilogueAllowed;
10074     case PreferPredicateTy::PredicateElseScalarEpilogue:
10075       return CM_ScalarEpilogueNotNeededUsePredicate;
10076     case PreferPredicateTy::PredicateOrDontVectorize:
10077       return CM_ScalarEpilogueNotAllowedUsePredicate;
10078     };
10079   }
10080 
10081   // 3) If set, obey the hints
10082   switch (Hints.getPredicate()) {
10083   case LoopVectorizeHints::FK_Enabled:
10084     return CM_ScalarEpilogueNotNeededUsePredicate;
10085   case LoopVectorizeHints::FK_Disabled:
10086     return CM_ScalarEpilogueAllowed;
10087   };
10088 
10089   // 4) if the TTI hook indicates this is profitable, request predication.
10090   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10091                                        LVL.getLAI()))
10092     return CM_ScalarEpilogueNotNeededUsePredicate;
10093 
10094   return CM_ScalarEpilogueAllowed;
10095 }
10096 
10097 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10098   // If Values have been set for this Def return the one relevant for \p Part.
10099   if (hasVectorValue(Def, Part))
10100     return Data.PerPartOutput[Def][Part];
10101 
10102   if (!hasScalarValue(Def, {Part, 0})) {
10103     Value *IRV = Def->getLiveInIRValue();
10104     Value *B = ILV->getBroadcastInstrs(IRV);
10105     set(Def, B, Part);
10106     return B;
10107   }
10108 
10109   Value *ScalarValue = get(Def, {Part, 0});
10110   // If we aren't vectorizing, we can just copy the scalar map values over
10111   // to the vector map.
10112   if (VF.isScalar()) {
10113     set(Def, ScalarValue, Part);
10114     return ScalarValue;
10115   }
10116 
10117   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10118   bool IsUniform = RepR && RepR->isUniform();
10119 
10120   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10121   // Check if there is a scalar value for the selected lane.
10122   if (!hasScalarValue(Def, {Part, LastLane})) {
10123     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10124     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
10125            "unexpected recipe found to be invariant");
10126     IsUniform = true;
10127     LastLane = 0;
10128   }
10129 
10130   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10131   // Set the insert point after the last scalarized instruction or after the
10132   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10133   // will directly follow the scalar definitions.
10134   auto OldIP = Builder.saveIP();
10135   auto NewIP =
10136       isa<PHINode>(LastInst)
10137           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10138           : std::next(BasicBlock::iterator(LastInst));
10139   Builder.SetInsertPoint(&*NewIP);
10140 
10141   // However, if we are vectorizing, we need to construct the vector values.
10142   // If the value is known to be uniform after vectorization, we can just
10143   // broadcast the scalar value corresponding to lane zero for each unroll
10144   // iteration. Otherwise, we construct the vector values using
10145   // insertelement instructions. Since the resulting vectors are stored in
10146   // State, we will only generate the insertelements once.
10147   Value *VectorValue = nullptr;
10148   if (IsUniform) {
10149     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10150     set(Def, VectorValue, Part);
10151   } else {
10152     // Initialize packing with insertelements to start from undef.
10153     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10154     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10155     set(Def, Undef, Part);
10156     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10157       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10158     VectorValue = get(Def, Part);
10159   }
10160   Builder.restoreIP(OldIP);
10161   return VectorValue;
10162 }
10163 
10164 // Process the loop in the VPlan-native vectorization path. This path builds
10165 // VPlan upfront in the vectorization pipeline, which allows to apply
10166 // VPlan-to-VPlan transformations from the very beginning without modifying the
10167 // input LLVM IR.
10168 static bool processLoopInVPlanNativePath(
10169     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10170     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10171     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10172     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10173     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10174     LoopVectorizationRequirements &Requirements) {
10175 
10176   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10177     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10178     return false;
10179   }
10180   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10181   Function *F = L->getHeader()->getParent();
10182   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10183 
10184   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10185       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10186 
10187   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10188                                 &Hints, IAI);
10189   // Use the planner for outer loop vectorization.
10190   // TODO: CM is not used at this point inside the planner. Turn CM into an
10191   // optional argument if we don't need it in the future.
10192   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10193                                Requirements, ORE);
10194 
10195   // Get user vectorization factor.
10196   ElementCount UserVF = Hints.getWidth();
10197 
10198   CM.collectElementTypesForWidening();
10199 
10200   // Plan how to best vectorize, return the best VF and its cost.
10201   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10202 
10203   // If we are stress testing VPlan builds, do not attempt to generate vector
10204   // code. Masked vector code generation support will follow soon.
10205   // Also, do not attempt to vectorize if no vector code will be produced.
10206   if (VPlanBuildStressTest || EnableVPlanPredication ||
10207       VectorizationFactor::Disabled() == VF)
10208     return false;
10209 
10210   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10211 
10212   {
10213     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10214                              F->getParent()->getDataLayout());
10215     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10216                            &CM, BFI, PSI, Checks);
10217     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10218                       << L->getHeader()->getParent()->getName() << "\"\n");
10219     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10220   }
10221 
10222   // Mark the loop as already vectorized to avoid vectorizing again.
10223   Hints.setAlreadyVectorized();
10224   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10225   return true;
10226 }
10227 
10228 // Emit a remark if there are stores to floats that required a floating point
10229 // extension. If the vectorized loop was generated with floating point there
10230 // will be a performance penalty from the conversion overhead and the change in
10231 // the vector width.
10232 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10233   SmallVector<Instruction *, 4> Worklist;
10234   for (BasicBlock *BB : L->getBlocks()) {
10235     for (Instruction &Inst : *BB) {
10236       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10237         if (S->getValueOperand()->getType()->isFloatTy())
10238           Worklist.push_back(S);
10239       }
10240     }
10241   }
10242 
10243   // Traverse the floating point stores upwards searching, for floating point
10244   // conversions.
10245   SmallPtrSet<const Instruction *, 4> Visited;
10246   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10247   while (!Worklist.empty()) {
10248     auto *I = Worklist.pop_back_val();
10249     if (!L->contains(I))
10250       continue;
10251     if (!Visited.insert(I).second)
10252       continue;
10253 
10254     // Emit a remark if the floating point store required a floating
10255     // point conversion.
10256     // TODO: More work could be done to identify the root cause such as a
10257     // constant or a function return type and point the user to it.
10258     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10259       ORE->emit([&]() {
10260         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10261                                           I->getDebugLoc(), L->getHeader())
10262                << "floating point conversion changes vector width. "
10263                << "Mixed floating point precision requires an up/down "
10264                << "cast that will negatively impact performance.";
10265       });
10266 
10267     for (Use &Op : I->operands())
10268       if (auto *OpI = dyn_cast<Instruction>(Op))
10269         Worklist.push_back(OpI);
10270   }
10271 }
10272 
10273 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10274     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10275                                !EnableLoopInterleaving),
10276       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10277                               !EnableLoopVectorization) {}
10278 
10279 bool LoopVectorizePass::processLoop(Loop *L) {
10280   assert((EnableVPlanNativePath || L->isInnermost()) &&
10281          "VPlan-native path is not enabled. Only process inner loops.");
10282 
10283 #ifndef NDEBUG
10284   const std::string DebugLocStr = getDebugLocString(L);
10285 #endif /* NDEBUG */
10286 
10287   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
10288                     << L->getHeader()->getParent()->getName() << "\" from "
10289                     << DebugLocStr << "\n");
10290 
10291   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10292 
10293   LLVM_DEBUG(
10294       dbgs() << "LV: Loop hints:"
10295              << " force="
10296              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10297                      ? "disabled"
10298                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10299                             ? "enabled"
10300                             : "?"))
10301              << " width=" << Hints.getWidth()
10302              << " interleave=" << Hints.getInterleave() << "\n");
10303 
10304   // Function containing loop
10305   Function *F = L->getHeader()->getParent();
10306 
10307   // Looking at the diagnostic output is the only way to determine if a loop
10308   // was vectorized (other than looking at the IR or machine code), so it
10309   // is important to generate an optimization remark for each loop. Most of
10310   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10311   // generated as OptimizationRemark and OptimizationRemarkMissed are
10312   // less verbose reporting vectorized loops and unvectorized loops that may
10313   // benefit from vectorization, respectively.
10314 
10315   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10316     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10317     return false;
10318   }
10319 
10320   PredicatedScalarEvolution PSE(*SE, *L);
10321 
10322   // Check if it is legal to vectorize the loop.
10323   LoopVectorizationRequirements Requirements;
10324   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10325                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10326   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10327     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10328     Hints.emitRemarkWithHints();
10329     return false;
10330   }
10331 
10332   // Check the function attributes and profiles to find out if this function
10333   // should be optimized for size.
10334   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10335       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10336 
10337   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10338   // here. They may require CFG and instruction level transformations before
10339   // even evaluating whether vectorization is profitable. Since we cannot modify
10340   // the incoming IR, we need to build VPlan upfront in the vectorization
10341   // pipeline.
10342   if (!L->isInnermost())
10343     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10344                                         ORE, BFI, PSI, Hints, Requirements);
10345 
10346   assert(L->isInnermost() && "Inner loop expected.");
10347 
10348   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10349   // count by optimizing for size, to minimize overheads.
10350   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10351   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10352     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10353                       << "This loop is worth vectorizing only if no scalar "
10354                       << "iteration overheads are incurred.");
10355     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10356       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10357     else {
10358       LLVM_DEBUG(dbgs() << "\n");
10359       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10360     }
10361   }
10362 
10363   // Check the function attributes to see if implicit floats are allowed.
10364   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10365   // an integer loop and the vector instructions selected are purely integer
10366   // vector instructions?
10367   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10368     reportVectorizationFailure(
10369         "Can't vectorize when the NoImplicitFloat attribute is used",
10370         "loop not vectorized due to NoImplicitFloat attribute",
10371         "NoImplicitFloat", ORE, L);
10372     Hints.emitRemarkWithHints();
10373     return false;
10374   }
10375 
10376   // Check if the target supports potentially unsafe FP vectorization.
10377   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10378   // for the target we're vectorizing for, to make sure none of the
10379   // additional fp-math flags can help.
10380   if (Hints.isPotentiallyUnsafe() &&
10381       TTI->isFPVectorizationPotentiallyUnsafe()) {
10382     reportVectorizationFailure(
10383         "Potentially unsafe FP op prevents vectorization",
10384         "loop not vectorized due to unsafe FP support.",
10385         "UnsafeFP", ORE, L);
10386     Hints.emitRemarkWithHints();
10387     return false;
10388   }
10389 
10390   bool AllowOrderedReductions;
10391   // If the flag is set, use that instead and override the TTI behaviour.
10392   if (ForceOrderedReductions.getNumOccurrences() > 0)
10393     AllowOrderedReductions = ForceOrderedReductions;
10394   else
10395     AllowOrderedReductions = TTI->enableOrderedReductions();
10396   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10397     ORE->emit([&]() {
10398       auto *ExactFPMathInst = Requirements.getExactFPInst();
10399       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10400                                                  ExactFPMathInst->getDebugLoc(),
10401                                                  ExactFPMathInst->getParent())
10402              << "loop not vectorized: cannot prove it is safe to reorder "
10403                 "floating-point operations";
10404     });
10405     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10406                          "reorder floating-point operations\n");
10407     Hints.emitRemarkWithHints();
10408     return false;
10409   }
10410 
10411   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10412   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10413 
10414   // If an override option has been passed in for interleaved accesses, use it.
10415   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10416     UseInterleaved = EnableInterleavedMemAccesses;
10417 
10418   // Analyze interleaved memory accesses.
10419   if (UseInterleaved) {
10420     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10421   }
10422 
10423   // Use the cost model.
10424   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10425                                 F, &Hints, IAI);
10426   CM.collectValuesToIgnore();
10427   CM.collectElementTypesForWidening();
10428 
10429   // Use the planner for vectorization.
10430   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10431                                Requirements, ORE);
10432 
10433   // Get user vectorization factor and interleave count.
10434   ElementCount UserVF = Hints.getWidth();
10435   unsigned UserIC = Hints.getInterleave();
10436 
10437   // Plan how to best vectorize, return the best VF and its cost.
10438   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10439 
10440   VectorizationFactor VF = VectorizationFactor::Disabled();
10441   unsigned IC = 1;
10442 
10443   if (MaybeVF) {
10444     VF = *MaybeVF;
10445     // Select the interleave count.
10446     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10447   }
10448 
10449   // Identify the diagnostic messages that should be produced.
10450   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10451   bool VectorizeLoop = true, InterleaveLoop = true;
10452   if (VF.Width.isScalar()) {
10453     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10454     VecDiagMsg = std::make_pair(
10455         "VectorizationNotBeneficial",
10456         "the cost-model indicates that vectorization is not beneficial");
10457     VectorizeLoop = false;
10458   }
10459 
10460   if (!MaybeVF && UserIC > 1) {
10461     // Tell the user interleaving was avoided up-front, despite being explicitly
10462     // requested.
10463     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10464                          "interleaving should be avoided up front\n");
10465     IntDiagMsg = std::make_pair(
10466         "InterleavingAvoided",
10467         "Ignoring UserIC, because interleaving was avoided up front");
10468     InterleaveLoop = false;
10469   } else if (IC == 1 && UserIC <= 1) {
10470     // Tell the user interleaving is not beneficial.
10471     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10472     IntDiagMsg = std::make_pair(
10473         "InterleavingNotBeneficial",
10474         "the cost-model indicates that interleaving is not beneficial");
10475     InterleaveLoop = false;
10476     if (UserIC == 1) {
10477       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10478       IntDiagMsg.second +=
10479           " and is explicitly disabled or interleave count is set to 1";
10480     }
10481   } else if (IC > 1 && UserIC == 1) {
10482     // Tell the user interleaving is beneficial, but it explicitly disabled.
10483     LLVM_DEBUG(
10484         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10485     IntDiagMsg = std::make_pair(
10486         "InterleavingBeneficialButDisabled",
10487         "the cost-model indicates that interleaving is beneficial "
10488         "but is explicitly disabled or interleave count is set to 1");
10489     InterleaveLoop = false;
10490   }
10491 
10492   // Override IC if user provided an interleave count.
10493   IC = UserIC > 0 ? UserIC : IC;
10494 
10495   // Emit diagnostic messages, if any.
10496   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10497   if (!VectorizeLoop && !InterleaveLoop) {
10498     // Do not vectorize or interleaving the loop.
10499     ORE->emit([&]() {
10500       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10501                                       L->getStartLoc(), L->getHeader())
10502              << VecDiagMsg.second;
10503     });
10504     ORE->emit([&]() {
10505       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10506                                       L->getStartLoc(), L->getHeader())
10507              << IntDiagMsg.second;
10508     });
10509     return false;
10510   } else if (!VectorizeLoop && InterleaveLoop) {
10511     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10512     ORE->emit([&]() {
10513       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10514                                         L->getStartLoc(), L->getHeader())
10515              << VecDiagMsg.second;
10516     });
10517   } else if (VectorizeLoop && !InterleaveLoop) {
10518     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10519                       << ") in " << DebugLocStr << '\n');
10520     ORE->emit([&]() {
10521       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10522                                         L->getStartLoc(), L->getHeader())
10523              << IntDiagMsg.second;
10524     });
10525   } else if (VectorizeLoop && InterleaveLoop) {
10526     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10527                       << ") in " << DebugLocStr << '\n');
10528     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10529   }
10530 
10531   bool DisableRuntimeUnroll = false;
10532   MDNode *OrigLoopID = L->getLoopID();
10533   {
10534     // Optimistically generate runtime checks. Drop them if they turn out to not
10535     // be profitable. Limit the scope of Checks, so the cleanup happens
10536     // immediately after vector codegeneration is done.
10537     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10538                              F->getParent()->getDataLayout());
10539     if (!VF.Width.isScalar() || IC > 1)
10540       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10541 
10542     using namespace ore;
10543     if (!VectorizeLoop) {
10544       assert(IC > 1 && "interleave count should not be 1 or 0");
10545       // If we decided that it is not legal to vectorize the loop, then
10546       // interleave it.
10547       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10548                                  &CM, BFI, PSI, Checks);
10549 
10550       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10551       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10552 
10553       ORE->emit([&]() {
10554         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10555                                   L->getHeader())
10556                << "interleaved loop (interleaved count: "
10557                << NV("InterleaveCount", IC) << ")";
10558       });
10559     } else {
10560       // If we decided that it is *legal* to vectorize the loop, then do it.
10561 
10562       // Consider vectorizing the epilogue too if it's profitable.
10563       VectorizationFactor EpilogueVF =
10564           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10565       if (EpilogueVF.Width.isVector()) {
10566 
10567         // The first pass vectorizes the main loop and creates a scalar epilogue
10568         // to be vectorized by executing the plan (potentially with a different
10569         // factor) again shortly afterwards.
10570         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10571         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10572                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10573 
10574         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10575         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10576                         DT);
10577         ++LoopsVectorized;
10578 
10579         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10580         formLCSSARecursively(*L, *DT, LI, SE);
10581 
10582         // Second pass vectorizes the epilogue and adjusts the control flow
10583         // edges from the first pass.
10584         EPI.MainLoopVF = EPI.EpilogueVF;
10585         EPI.MainLoopUF = EPI.EpilogueUF;
10586         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10587                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10588                                                  Checks);
10589 
10590         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10591         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10592                         DT);
10593         ++LoopsEpilogueVectorized;
10594 
10595         if (!MainILV.areSafetyChecksAdded())
10596           DisableRuntimeUnroll = true;
10597       } else {
10598         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10599                                &LVL, &CM, BFI, PSI, Checks);
10600 
10601         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10602         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10603         ++LoopsVectorized;
10604 
10605         // Add metadata to disable runtime unrolling a scalar loop when there
10606         // are no runtime checks about strides and memory. A scalar loop that is
10607         // rarely used is not worth unrolling.
10608         if (!LB.areSafetyChecksAdded())
10609           DisableRuntimeUnroll = true;
10610       }
10611       // Report the vectorization decision.
10612       ORE->emit([&]() {
10613         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10614                                   L->getHeader())
10615                << "vectorized loop (vectorization width: "
10616                << NV("VectorizationFactor", VF.Width)
10617                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10618       });
10619     }
10620 
10621     if (ORE->allowExtraAnalysis(LV_NAME))
10622       checkMixedPrecision(L, ORE);
10623   }
10624 
10625   Optional<MDNode *> RemainderLoopID =
10626       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10627                                       LLVMLoopVectorizeFollowupEpilogue});
10628   if (RemainderLoopID.hasValue()) {
10629     L->setLoopID(RemainderLoopID.getValue());
10630   } else {
10631     if (DisableRuntimeUnroll)
10632       AddRuntimeUnrollDisableMetaData(L);
10633 
10634     // Mark the loop as already vectorized to avoid vectorizing again.
10635     Hints.setAlreadyVectorized();
10636   }
10637 
10638   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10639   return true;
10640 }
10641 
10642 LoopVectorizeResult LoopVectorizePass::runImpl(
10643     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10644     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10645     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10646     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10647     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10648   SE = &SE_;
10649   LI = &LI_;
10650   TTI = &TTI_;
10651   DT = &DT_;
10652   BFI = &BFI_;
10653   TLI = TLI_;
10654   AA = &AA_;
10655   AC = &AC_;
10656   GetLAA = &GetLAA_;
10657   DB = &DB_;
10658   ORE = &ORE_;
10659   PSI = PSI_;
10660 
10661   // Don't attempt if
10662   // 1. the target claims to have no vector registers, and
10663   // 2. interleaving won't help ILP.
10664   //
10665   // The second condition is necessary because, even if the target has no
10666   // vector registers, loop vectorization may still enable scalar
10667   // interleaving.
10668   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10669       TTI->getMaxInterleaveFactor(1) < 2)
10670     return LoopVectorizeResult(false, false);
10671 
10672   bool Changed = false, CFGChanged = false;
10673 
10674   // The vectorizer requires loops to be in simplified form.
10675   // Since simplification may add new inner loops, it has to run before the
10676   // legality and profitability checks. This means running the loop vectorizer
10677   // will simplify all loops, regardless of whether anything end up being
10678   // vectorized.
10679   for (auto &L : *LI)
10680     Changed |= CFGChanged |=
10681         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10682 
10683   // Build up a worklist of inner-loops to vectorize. This is necessary as
10684   // the act of vectorizing or partially unrolling a loop creates new loops
10685   // and can invalidate iterators across the loops.
10686   SmallVector<Loop *, 8> Worklist;
10687 
10688   for (Loop *L : *LI)
10689     collectSupportedLoops(*L, LI, ORE, Worklist);
10690 
10691   LoopsAnalyzed += Worklist.size();
10692 
10693   // Now walk the identified inner loops.
10694   while (!Worklist.empty()) {
10695     Loop *L = Worklist.pop_back_val();
10696 
10697     // For the inner loops we actually process, form LCSSA to simplify the
10698     // transform.
10699     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10700 
10701     Changed |= CFGChanged |= processLoop(L);
10702   }
10703 
10704   // Process each loop nest in the function.
10705   return LoopVectorizeResult(Changed, CFGChanged);
10706 }
10707 
10708 PreservedAnalyses LoopVectorizePass::run(Function &F,
10709                                          FunctionAnalysisManager &AM) {
10710     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10711     auto &LI = AM.getResult<LoopAnalysis>(F);
10712     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10713     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10714     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10715     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10716     auto &AA = AM.getResult<AAManager>(F);
10717     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10718     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10719     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10720 
10721     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10722     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10723         [&](Loop &L) -> const LoopAccessInfo & {
10724       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10725                                         TLI, TTI, nullptr, nullptr, nullptr};
10726       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10727     };
10728     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10729     ProfileSummaryInfo *PSI =
10730         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10731     LoopVectorizeResult Result =
10732         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10733     if (!Result.MadeAnyChange)
10734       return PreservedAnalyses::all();
10735     PreservedAnalyses PA;
10736 
10737     // We currently do not preserve loopinfo/dominator analyses with outer loop
10738     // vectorization. Until this is addressed, mark these analyses as preserved
10739     // only for non-VPlan-native path.
10740     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10741     if (!EnableVPlanNativePath) {
10742       PA.preserve<LoopAnalysis>();
10743       PA.preserve<DominatorTreeAnalysis>();
10744     }
10745 
10746     if (Result.MadeCFGChange) {
10747       // Making CFG changes likely means a loop got vectorized. Indicate that
10748       // extra simplification passes should be run.
10749       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10750       // be run if runtime checks have been added.
10751       AM.getResult<ShouldRunExtraVectorPasses>(F);
10752       PA.preserve<ShouldRunExtraVectorPasses>();
10753     } else {
10754       PA.preserveSet<CFGAnalyses>();
10755     }
10756     return PA;
10757 }
10758 
10759 void LoopVectorizePass::printPipeline(
10760     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10761   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10762       OS, MapClassName2PassName);
10763 
10764   OS << "<";
10765   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10766   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10767   OS << ">";
10768 }
10769