1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 /// The number of stores in a loop that are allowed to need predication.
311 static cl::opt<unsigned> NumberOfStoresToPredicate(
312     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313     cl::desc("Max number of stores to be predicated behind an if."));
314 
315 static cl::opt<bool> EnableIndVarRegisterHeur(
316     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317     cl::desc("Count the induction variable only once when interleaving"));
318 
319 static cl::opt<bool> EnableCondStoresVectorization(
320     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321     cl::desc("Enable if predication of stores during vectorization."));
322 
323 static cl::opt<unsigned> MaxNestedScalarReductionIC(
324     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325     cl::desc("The maximum interleave count to use when interleaving a scalar "
326              "reduction in a nested loop."));
327 
328 static cl::opt<bool>
329     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330                            cl::Hidden,
331                            cl::desc("Prefer in-loop vector reductions, "
332                                     "overriding the targets preference."));
333 
334 static cl::opt<bool> ForceOrderedReductions(
335     "force-ordered-reductions", cl::init(false), cl::Hidden,
336     cl::desc("Enable the vectorisation of loops with in-order (strict) "
337              "FP reductions"));
338 
339 static cl::opt<bool> PreferPredicatedReductionSelect(
340     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341     cl::desc(
342         "Prefer predicating a reduction operation over an after loop select."));
343 
344 cl::opt<bool> EnableVPlanNativePath(
345     "enable-vplan-native-path", cl::init(false), cl::Hidden,
346     cl::desc("Enable VPlan-native vectorization path with "
347              "support for outer loop vectorization."));
348 
349 // FIXME: Remove this switch once we have divergence analysis. Currently we
350 // assume divergent non-backedge branches when this switch is true.
351 cl::opt<bool> EnableVPlanPredication(
352     "enable-vplan-predication", cl::init(false), cl::Hidden,
353     cl::desc("Enable VPlan-native vectorization path predicator with "
354              "support for outer loop vectorization."));
355 
356 // This flag enables the stress testing of the VPlan H-CFG construction in the
357 // VPlan-native vectorization path. It must be used in conjuction with
358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359 // verification of the H-CFGs built.
360 static cl::opt<bool> VPlanBuildStressTest(
361     "vplan-build-stress-test", cl::init(false), cl::Hidden,
362     cl::desc(
363         "Build VPlan for every supported loop nest in the function and bail "
364         "out right after the build (stress test the VPlan H-CFG construction "
365         "in the VPlan-native vectorization path)."));
366 
367 cl::opt<bool> llvm::EnableLoopInterleaving(
368     "interleave-loops", cl::init(true), cl::Hidden,
369     cl::desc("Enable loop interleaving in Loop vectorization passes"));
370 cl::opt<bool> llvm::EnableLoopVectorization(
371     "vectorize-loops", cl::init(true), cl::Hidden,
372     cl::desc("Run the Loop vectorization passes"));
373 
374 cl::opt<bool> PrintVPlansInDotFormat(
375     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376     cl::desc("Use dot format instead of plain text when dumping VPlans"));
377 
378 /// A helper function that returns true if the given type is irregular. The
379 /// type is irregular if its allocated size doesn't equal the store size of an
380 /// element of the corresponding vector type.
381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
382   // Determine if an array of N elements of type Ty is "bitcast compatible"
383   // with a <N x Ty> vector.
384   // This is only true if there is no padding between the array elements.
385   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
386 }
387 
388 /// A helper function that returns the reciprocal of the block probability of
389 /// predicated blocks. If we return X, we are assuming the predicated block
390 /// will execute once for every X iterations of the loop header.
391 ///
392 /// TODO: We should use actual block probability here, if available. Currently,
393 ///       we always assume predicated blocks have a 50% chance of executing.
394 static unsigned getReciprocalPredBlockProb() { return 2; }
395 
396 /// A helper function that returns an integer or floating-point constant with
397 /// value C.
398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
399   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
400                            : ConstantFP::get(Ty, C);
401 }
402 
403 /// Returns "best known" trip count for the specified loop \p L as defined by
404 /// the following procedure:
405 ///   1) Returns exact trip count if it is known.
406 ///   2) Returns expected trip count according to profile data if any.
407 ///   3) Returns upper bound estimate if it is known.
408 ///   4) Returns None if all of the above failed.
409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
410   // Check if exact trip count is known.
411   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
412     return ExpectedTC;
413 
414   // Check if there is an expected trip count available from profile data.
415   if (LoopVectorizeWithBlockFrequency)
416     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
417       return EstimatedTC;
418 
419   // Check if upper bound estimate is known.
420   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
421     return ExpectedTC;
422 
423   return None;
424 }
425 
426 // Forward declare GeneratedRTChecks.
427 class GeneratedRTChecks;
428 
429 namespace llvm {
430 
431 AnalysisKey ShouldRunExtraVectorPasses::Key;
432 
433 /// InnerLoopVectorizer vectorizes loops which contain only one basic
434 /// block to a specified vectorization factor (VF).
435 /// This class performs the widening of scalars into vectors, or multiple
436 /// scalars. This class also implements the following features:
437 /// * It inserts an epilogue loop for handling loops that don't have iteration
438 ///   counts that are known to be a multiple of the vectorization factor.
439 /// * It handles the code generation for reduction variables.
440 /// * Scalarization (implementation using scalars) of un-vectorizable
441 ///   instructions.
442 /// InnerLoopVectorizer does not perform any vectorization-legality
443 /// checks, and relies on the caller to check for the different legality
444 /// aspects. The InnerLoopVectorizer relies on the
445 /// LoopVectorizationLegality class to provide information about the induction
446 /// and reduction variables that were found to a given vectorization factor.
447 class InnerLoopVectorizer {
448 public:
449   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
450                       LoopInfo *LI, DominatorTree *DT,
451                       const TargetLibraryInfo *TLI,
452                       const TargetTransformInfo *TTI, AssumptionCache *AC,
453                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
454                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
455                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
456                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
457       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
458         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
459         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
460         PSI(PSI), RTChecks(RTChecks) {
461     // Query this against the original loop and save it here because the profile
462     // of the original loop header may change as the transformation happens.
463     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
464         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
465   }
466 
467   virtual ~InnerLoopVectorizer() = default;
468 
469   /// Create a new empty loop that will contain vectorized instructions later
470   /// on, while the old loop will be used as the scalar remainder. Control flow
471   /// is generated around the vectorized (and scalar epilogue) loops consisting
472   /// of various checks and bypasses. Return the pre-header block of the new
473   /// loop and the start value for the canonical induction, if it is != 0. The
474   /// latter is the case when vectorizing the epilogue loop. In the case of
475   /// epilogue vectorization, this function is overriden to handle the more
476   /// complex control flow around the loops.
477   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
478 
479   /// Widen a single call instruction within the innermost loop.
480   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
481                             VPTransformState &State);
482 
483   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
484   void fixVectorizedLoop(VPTransformState &State);
485 
486   // Return true if any runtime check is added.
487   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
488 
489   /// A type for vectorized values in the new loop. Each value from the
490   /// original loop, when vectorized, is represented by UF vector values in the
491   /// new unrolled loop, where UF is the unroll factor.
492   using VectorParts = SmallVector<Value *, 2>;
493 
494   /// Vectorize a single first-order recurrence or pointer induction PHINode in
495   /// a block. This method handles the induction variable canonicalization. It
496   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
497   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
498                            VPTransformState &State);
499 
500   /// A helper function to scalarize a single Instruction in the innermost loop.
501   /// Generates a sequence of scalar instances for each lane between \p MinLane
502   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
503   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
504   /// Instr's operands.
505   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
506                             const VPIteration &Instance, bool IfPredicateInstr,
507                             VPTransformState &State);
508 
509   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
510   /// is provided, the integer induction variable will first be truncated to
511   /// the corresponding type. \p CanonicalIV is the scalar value generated for
512   /// the canonical induction variable.
513   void widenIntOrFpInduction(PHINode *IV, const InductionDescriptor &ID,
514                              Value *Start, TruncInst *Trunc, VPValue *Def,
515                              VPTransformState &State, Value *CanonicalIV);
516 
517   /// Construct the vector value of a scalarized value \p V one lane at a time.
518   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
519                                  VPTransformState &State);
520 
521   /// Try to vectorize interleaved access group \p Group with the base address
522   /// given in \p Addr, optionally masking the vector operations if \p
523   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
524   /// values in the vectorized loop.
525   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
526                                 ArrayRef<VPValue *> VPDefs,
527                                 VPTransformState &State, VPValue *Addr,
528                                 ArrayRef<VPValue *> StoredValues,
529                                 VPValue *BlockInMask = nullptr);
530 
531   /// Set the debug location in the builder \p Ptr using the debug location in
532   /// \p V. If \p Ptr is None then it uses the class member's Builder.
533   void setDebugLocFromInst(const Value *V,
534                            Optional<IRBuilder<> *> CustomBuilder = None);
535 
536   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
537   void fixNonInductionPHIs(VPTransformState &State);
538 
539   /// Returns true if the reordering of FP operations is not allowed, but we are
540   /// able to vectorize with strict in-order reductions for the given RdxDesc.
541   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
542 
543   /// Create a broadcast instruction. This method generates a broadcast
544   /// instruction (shuffle) for loop invariant values and for the induction
545   /// value. If this is the induction variable then we extend it to N, N+1, ...
546   /// this is needed because each iteration in the loop corresponds to a SIMD
547   /// element.
548   virtual Value *getBroadcastInstrs(Value *V);
549 
550   /// Add metadata from one instruction to another.
551   ///
552   /// This includes both the original MDs from \p From and additional ones (\see
553   /// addNewMetadata).  Use this for *newly created* instructions in the vector
554   /// loop.
555   void addMetadata(Instruction *To, Instruction *From);
556 
557   /// Similar to the previous function but it adds the metadata to a
558   /// vector of instructions.
559   void addMetadata(ArrayRef<Value *> To, Instruction *From);
560 
561 protected:
562   friend class LoopVectorizationPlanner;
563 
564   /// A small list of PHINodes.
565   using PhiVector = SmallVector<PHINode *, 4>;
566 
567   /// A type for scalarized values in the new loop. Each value from the
568   /// original loop, when scalarized, is represented by UF x VF scalar values
569   /// in the new unrolled loop, where UF is the unroll factor and VF is the
570   /// vectorization factor.
571   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
572 
573   /// Set up the values of the IVs correctly when exiting the vector loop.
574   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
575                     Value *CountRoundDown, Value *EndValue,
576                     BasicBlock *MiddleBlock);
577 
578   /// Introduce a conditional branch (on true, condition to be set later) at the
579   /// end of the header=latch connecting it to itself (across the backedge) and
580   /// to the exit block of \p L.
581   void createHeaderBranch(Loop *L);
582 
583   /// Handle all cross-iteration phis in the header.
584   void fixCrossIterationPHIs(VPTransformState &State);
585 
586   /// Create the exit value of first order recurrences in the middle block and
587   /// update their users.
588   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
589                                VPTransformState &State);
590 
591   /// Create code for the loop exit value of the reduction.
592   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
593 
594   /// Clear NSW/NUW flags from reduction instructions if necessary.
595   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
596                                VPTransformState &State);
597 
598   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
599   /// means we need to add the appropriate incoming value from the middle
600   /// block as exiting edges from the scalar epilogue loop (if present) are
601   /// already in place, and we exit the vector loop exclusively to the middle
602   /// block.
603   void fixLCSSAPHIs(VPTransformState &State);
604 
605   /// Iteratively sink the scalarized operands of a predicated instruction into
606   /// the block that was created for it.
607   void sinkScalarOperands(Instruction *PredInst);
608 
609   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
610   /// represented as.
611   void truncateToMinimalBitwidths(VPTransformState &State);
612 
613   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
614   /// variable on which to base the steps, \p Step is the size of the step, and
615   /// \p EntryVal is the value from the original loop that maps to the steps.
616   /// Note that \p EntryVal doesn't have to be an induction variable - it
617   /// can also be a truncate instruction.
618   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
619                         const InductionDescriptor &ID, VPValue *Def,
620                         VPTransformState &State);
621 
622   /// Create a vector induction phi node based on an existing scalar one. \p
623   /// EntryVal is the value from the original loop that maps to the vector phi
624   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
625   /// truncate instruction, instead of widening the original IV, we widen a
626   /// version of the IV truncated to \p EntryVal's type.
627   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
628                                        Value *Step, Value *Start,
629                                        Instruction *EntryVal, VPValue *Def,
630                                        VPTransformState &State);
631 
632   /// Returns true if an instruction \p I should be scalarized instead of
633   /// vectorized for the chosen vectorization factor.
634   bool shouldScalarizeInstruction(Instruction *I) const;
635 
636   /// Returns true if we should generate a scalar version of \p IV.
637   bool needsScalarInduction(Instruction *IV) const;
638 
639   /// Returns (and creates if needed) the original loop trip count.
640   Value *getOrCreateTripCount(Loop *NewLoop);
641 
642   /// Returns (and creates if needed) the trip count of the widened loop.
643   Value *getOrCreateVectorTripCount(Loop *NewLoop);
644 
645   /// Returns a bitcasted value to the requested vector type.
646   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
647   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
648                                 const DataLayout &DL);
649 
650   /// Emit a bypass check to see if the vector trip count is zero, including if
651   /// it overflows.
652   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
653 
654   /// Emit a bypass check to see if all of the SCEV assumptions we've
655   /// had to make are correct. Returns the block containing the checks or
656   /// nullptr if no checks have been added.
657   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
658 
659   /// Emit bypass checks to check any memory assumptions we may have made.
660   /// Returns the block containing the checks or nullptr if no checks have been
661   /// added.
662   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
663 
664   /// Compute the transformed value of Index at offset StartValue using step
665   /// StepValue.
666   /// For integer induction, returns StartValue + Index * StepValue.
667   /// For pointer induction, returns StartValue[Index * StepValue].
668   /// FIXME: The newly created binary instructions should contain nsw/nuw
669   /// flags, which can be found from the original scalar operations.
670   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
671                               const DataLayout &DL,
672                               const InductionDescriptor &ID,
673                               BasicBlock *VectorHeader) const;
674 
675   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
676   /// vector loop preheader, middle block and scalar preheader. Also
677   /// allocate a loop object for the new vector loop and return it.
678   Loop *createVectorLoopSkeleton(StringRef Prefix);
679 
680   /// Create new phi nodes for the induction variables to resume iteration count
681   /// in the scalar epilogue, from where the vectorized loop left off.
682   /// In cases where the loop skeleton is more complicated (eg. epilogue
683   /// vectorization) and the resume values can come from an additional bypass
684   /// block, the \p AdditionalBypass pair provides information about the bypass
685   /// block and the end value on the edge from bypass to this loop.
686   void createInductionResumeValues(
687       Loop *L,
688       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
689 
690   /// Complete the loop skeleton by adding debug MDs, creating appropriate
691   /// conditional branches in the middle block, preparing the builder and
692   /// running the verifier. Take in the vector loop \p L as argument, and return
693   /// the preheader of the completed vector loop.
694   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
695 
696   /// Add additional metadata to \p To that was not present on \p Orig.
697   ///
698   /// Currently this is used to add the noalias annotations based on the
699   /// inserted memchecks.  Use this for instructions that are *cloned* into the
700   /// vector loop.
701   void addNewMetadata(Instruction *To, const Instruction *Orig);
702 
703   /// Collect poison-generating recipes that may generate a poison value that is
704   /// used after vectorization, even when their operands are not poison. Those
705   /// recipes meet the following conditions:
706   ///  * Contribute to the address computation of a recipe generating a widen
707   ///    memory load/store (VPWidenMemoryInstructionRecipe or
708   ///    VPInterleaveRecipe).
709   ///  * Such a widen memory load/store has at least one underlying Instruction
710   ///    that is in a basic block that needs predication and after vectorization
711   ///    the generated instruction won't be predicated.
712   void collectPoisonGeneratingRecipes(VPTransformState &State);
713 
714   /// Allow subclasses to override and print debug traces before/after vplan
715   /// execution, when trace information is requested.
716   virtual void printDebugTracesAtStart(){};
717   virtual void printDebugTracesAtEnd(){};
718 
719   /// The original loop.
720   Loop *OrigLoop;
721 
722   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
723   /// dynamic knowledge to simplify SCEV expressions and converts them to a
724   /// more usable form.
725   PredicatedScalarEvolution &PSE;
726 
727   /// Loop Info.
728   LoopInfo *LI;
729 
730   /// Dominator Tree.
731   DominatorTree *DT;
732 
733   /// Alias Analysis.
734   AAResults *AA;
735 
736   /// Target Library Info.
737   const TargetLibraryInfo *TLI;
738 
739   /// Target Transform Info.
740   const TargetTransformInfo *TTI;
741 
742   /// Assumption Cache.
743   AssumptionCache *AC;
744 
745   /// Interface to emit optimization remarks.
746   OptimizationRemarkEmitter *ORE;
747 
748   /// LoopVersioning.  It's only set up (non-null) if memchecks were
749   /// used.
750   ///
751   /// This is currently only used to add no-alias metadata based on the
752   /// memchecks.  The actually versioning is performed manually.
753   std::unique_ptr<LoopVersioning> LVer;
754 
755   /// The vectorization SIMD factor to use. Each vector will have this many
756   /// vector elements.
757   ElementCount VF;
758 
759   /// The vectorization unroll factor to use. Each scalar is vectorized to this
760   /// many different vector instructions.
761   unsigned UF;
762 
763   /// The builder that we use
764   IRBuilder<> Builder;
765 
766   // --- Vectorization state ---
767 
768   /// The vector-loop preheader.
769   BasicBlock *LoopVectorPreHeader;
770 
771   /// The scalar-loop preheader.
772   BasicBlock *LoopScalarPreHeader;
773 
774   /// Middle Block between the vector and the scalar.
775   BasicBlock *LoopMiddleBlock;
776 
777   /// The unique ExitBlock of the scalar loop if one exists.  Note that
778   /// there can be multiple exiting edges reaching this block.
779   BasicBlock *LoopExitBlock;
780 
781   /// The vector loop body.
782   BasicBlock *LoopVectorBody;
783 
784   /// The scalar loop body.
785   BasicBlock *LoopScalarBody;
786 
787   /// A list of all bypass blocks. The first block is the entry of the loop.
788   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
789 
790   /// Store instructions that were predicated.
791   SmallVector<Instruction *, 4> PredicatedInstructions;
792 
793   /// Trip count of the original loop.
794   Value *TripCount = nullptr;
795 
796   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
797   Value *VectorTripCount = nullptr;
798 
799   /// The legality analysis.
800   LoopVectorizationLegality *Legal;
801 
802   /// The profitablity analysis.
803   LoopVectorizationCostModel *Cost;
804 
805   // Record whether runtime checks are added.
806   bool AddedSafetyChecks = false;
807 
808   // Holds the end values for each induction variable. We save the end values
809   // so we can later fix-up the external users of the induction variables.
810   DenseMap<PHINode *, Value *> IVEndValues;
811 
812   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
813   // fixed up at the end of vector code generation.
814   SmallVector<PHINode *, 8> OrigPHIsToFix;
815 
816   /// BFI and PSI are used to check for profile guided size optimizations.
817   BlockFrequencyInfo *BFI;
818   ProfileSummaryInfo *PSI;
819 
820   // Whether this loop should be optimized for size based on profile guided size
821   // optimizatios.
822   bool OptForSizeBasedOnProfile;
823 
824   /// Structure to hold information about generated runtime checks, responsible
825   /// for cleaning the checks, if vectorization turns out unprofitable.
826   GeneratedRTChecks &RTChecks;
827 };
828 
829 class InnerLoopUnroller : public InnerLoopVectorizer {
830 public:
831   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
832                     LoopInfo *LI, DominatorTree *DT,
833                     const TargetLibraryInfo *TLI,
834                     const TargetTransformInfo *TTI, AssumptionCache *AC,
835                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
836                     LoopVectorizationLegality *LVL,
837                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
838                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
839       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
840                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
841                             BFI, PSI, Check) {}
842 
843 private:
844   Value *getBroadcastInstrs(Value *V) override;
845 };
846 
847 /// Encapsulate information regarding vectorization of a loop and its epilogue.
848 /// This information is meant to be updated and used across two stages of
849 /// epilogue vectorization.
850 struct EpilogueLoopVectorizationInfo {
851   ElementCount MainLoopVF = ElementCount::getFixed(0);
852   unsigned MainLoopUF = 0;
853   ElementCount EpilogueVF = ElementCount::getFixed(0);
854   unsigned EpilogueUF = 0;
855   BasicBlock *MainLoopIterationCountCheck = nullptr;
856   BasicBlock *EpilogueIterationCountCheck = nullptr;
857   BasicBlock *SCEVSafetyCheck = nullptr;
858   BasicBlock *MemSafetyCheck = nullptr;
859   Value *TripCount = nullptr;
860   Value *VectorTripCount = nullptr;
861 
862   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
863                                 ElementCount EVF, unsigned EUF)
864       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
865     assert(EUF == 1 &&
866            "A high UF for the epilogue loop is likely not beneficial.");
867   }
868 };
869 
870 /// An extension of the inner loop vectorizer that creates a skeleton for a
871 /// vectorized loop that has its epilogue (residual) also vectorized.
872 /// The idea is to run the vplan on a given loop twice, firstly to setup the
873 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
874 /// from the first step and vectorize the epilogue.  This is achieved by
875 /// deriving two concrete strategy classes from this base class and invoking
876 /// them in succession from the loop vectorizer planner.
877 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
878 public:
879   InnerLoopAndEpilogueVectorizer(
880       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
881       DominatorTree *DT, const TargetLibraryInfo *TLI,
882       const TargetTransformInfo *TTI, AssumptionCache *AC,
883       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
884       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
885       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
886       GeneratedRTChecks &Checks)
887       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
888                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
889                             Checks),
890         EPI(EPI) {}
891 
892   // Override this function to handle the more complex control flow around the
893   // three loops.
894   std::pair<BasicBlock *, Value *>
895   createVectorizedLoopSkeleton() final override {
896     return createEpilogueVectorizedLoopSkeleton();
897   }
898 
899   /// The interface for creating a vectorized skeleton using one of two
900   /// different strategies, each corresponding to one execution of the vplan
901   /// as described above.
902   virtual std::pair<BasicBlock *, Value *>
903   createEpilogueVectorizedLoopSkeleton() = 0;
904 
905   /// Holds and updates state information required to vectorize the main loop
906   /// and its epilogue in two separate passes. This setup helps us avoid
907   /// regenerating and recomputing runtime safety checks. It also helps us to
908   /// shorten the iteration-count-check path length for the cases where the
909   /// iteration count of the loop is so small that the main vector loop is
910   /// completely skipped.
911   EpilogueLoopVectorizationInfo &EPI;
912 };
913 
914 /// A specialized derived class of inner loop vectorizer that performs
915 /// vectorization of *main* loops in the process of vectorizing loops and their
916 /// epilogues.
917 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
918 public:
919   EpilogueVectorizerMainLoop(
920       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
921       DominatorTree *DT, const TargetLibraryInfo *TLI,
922       const TargetTransformInfo *TTI, AssumptionCache *AC,
923       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
924       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
925       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
926       GeneratedRTChecks &Check)
927       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
928                                        EPI, LVL, CM, BFI, PSI, Check) {}
929   /// Implements the interface for creating a vectorized skeleton using the
930   /// *main loop* strategy (ie the first pass of vplan execution).
931   std::pair<BasicBlock *, Value *>
932   createEpilogueVectorizedLoopSkeleton() final override;
933 
934 protected:
935   /// Emits an iteration count bypass check once for the main loop (when \p
936   /// ForEpilogue is false) and once for the epilogue loop (when \p
937   /// ForEpilogue is true).
938   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
939                                              bool ForEpilogue);
940   void printDebugTracesAtStart() override;
941   void printDebugTracesAtEnd() override;
942 };
943 
944 // A specialized derived class of inner loop vectorizer that performs
945 // vectorization of *epilogue* loops in the process of vectorizing loops and
946 // their epilogues.
947 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
948 public:
949   EpilogueVectorizerEpilogueLoop(
950       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
951       DominatorTree *DT, const TargetLibraryInfo *TLI,
952       const TargetTransformInfo *TTI, AssumptionCache *AC,
953       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
954       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
955       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
956       GeneratedRTChecks &Checks)
957       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
958                                        EPI, LVL, CM, BFI, PSI, Checks) {}
959   /// Implements the interface for creating a vectorized skeleton using the
960   /// *epilogue loop* strategy (ie the second pass of vplan execution).
961   std::pair<BasicBlock *, Value *>
962   createEpilogueVectorizedLoopSkeleton() final override;
963 
964 protected:
965   /// Emits an iteration count bypass check after the main vector loop has
966   /// finished to see if there are any iterations left to execute by either
967   /// the vector epilogue or the scalar epilogue.
968   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
969                                                       BasicBlock *Bypass,
970                                                       BasicBlock *Insert);
971   void printDebugTracesAtStart() override;
972   void printDebugTracesAtEnd() override;
973 };
974 } // end namespace llvm
975 
976 /// Look for a meaningful debug location on the instruction or it's
977 /// operands.
978 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
979   if (!I)
980     return I;
981 
982   DebugLoc Empty;
983   if (I->getDebugLoc() != Empty)
984     return I;
985 
986   for (Use &Op : I->operands()) {
987     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
988       if (OpInst->getDebugLoc() != Empty)
989         return OpInst;
990   }
991 
992   return I;
993 }
994 
995 void InnerLoopVectorizer::setDebugLocFromInst(
996     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
997   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
998   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
999     const DILocation *DIL = Inst->getDebugLoc();
1000 
1001     // When a FSDiscriminator is enabled, we don't need to add the multiply
1002     // factors to the discriminators.
1003     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1004         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1005       // FIXME: For scalable vectors, assume vscale=1.
1006       auto NewDIL =
1007           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1008       if (NewDIL)
1009         B->SetCurrentDebugLocation(NewDIL.getValue());
1010       else
1011         LLVM_DEBUG(dbgs()
1012                    << "Failed to create new discriminator: "
1013                    << DIL->getFilename() << " Line: " << DIL->getLine());
1014     } else
1015       B->SetCurrentDebugLocation(DIL);
1016   } else
1017     B->SetCurrentDebugLocation(DebugLoc());
1018 }
1019 
1020 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1021 /// is passed, the message relates to that particular instruction.
1022 #ifndef NDEBUG
1023 static void debugVectorizationMessage(const StringRef Prefix,
1024                                       const StringRef DebugMsg,
1025                                       Instruction *I) {
1026   dbgs() << "LV: " << Prefix << DebugMsg;
1027   if (I != nullptr)
1028     dbgs() << " " << *I;
1029   else
1030     dbgs() << '.';
1031   dbgs() << '\n';
1032 }
1033 #endif
1034 
1035 /// Create an analysis remark that explains why vectorization failed
1036 ///
1037 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1038 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1039 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1040 /// the location of the remark.  \return the remark object that can be
1041 /// streamed to.
1042 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1043     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1044   Value *CodeRegion = TheLoop->getHeader();
1045   DebugLoc DL = TheLoop->getStartLoc();
1046 
1047   if (I) {
1048     CodeRegion = I->getParent();
1049     // If there is no debug location attached to the instruction, revert back to
1050     // using the loop's.
1051     if (I->getDebugLoc())
1052       DL = I->getDebugLoc();
1053   }
1054 
1055   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1056 }
1057 
1058 namespace llvm {
1059 
1060 /// Return a value for Step multiplied by VF.
1061 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
1062                        int64_t Step) {
1063   assert(Ty->isIntegerTy() && "Expected an integer step");
1064   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1065   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1066 }
1067 
1068 /// Return the runtime value for VF.
1069 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1070   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1071   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1072 }
1073 
1074 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) {
1075   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1076   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1077   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1078   return B.CreateUIToFP(RuntimeVF, FTy);
1079 }
1080 
1081 void reportVectorizationFailure(const StringRef DebugMsg,
1082                                 const StringRef OREMsg, const StringRef ORETag,
1083                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1084                                 Instruction *I) {
1085   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1086   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1087   ORE->emit(
1088       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1089       << "loop not vectorized: " << OREMsg);
1090 }
1091 
1092 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1093                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1094                              Instruction *I) {
1095   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1096   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1097   ORE->emit(
1098       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1099       << Msg);
1100 }
1101 
1102 } // end namespace llvm
1103 
1104 #ifndef NDEBUG
1105 /// \return string containing a file name and a line # for the given loop.
1106 static std::string getDebugLocString(const Loop *L) {
1107   std::string Result;
1108   if (L) {
1109     raw_string_ostream OS(Result);
1110     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1111       LoopDbgLoc.print(OS);
1112     else
1113       // Just print the module name.
1114       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1115     OS.flush();
1116   }
1117   return Result;
1118 }
1119 #endif
1120 
1121 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1122                                          const Instruction *Orig) {
1123   // If the loop was versioned with memchecks, add the corresponding no-alias
1124   // metadata.
1125   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1126     LVer->annotateInstWithNoAlias(To, Orig);
1127 }
1128 
1129 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1130     VPTransformState &State) {
1131 
1132   // Collect recipes in the backward slice of `Root` that may generate a poison
1133   // value that is used after vectorization.
1134   SmallPtrSet<VPRecipeBase *, 16> Visited;
1135   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1136     SmallVector<VPRecipeBase *, 16> Worklist;
1137     Worklist.push_back(Root);
1138 
1139     // Traverse the backward slice of Root through its use-def chain.
1140     while (!Worklist.empty()) {
1141       VPRecipeBase *CurRec = Worklist.back();
1142       Worklist.pop_back();
1143 
1144       if (!Visited.insert(CurRec).second)
1145         continue;
1146 
1147       // Prune search if we find another recipe generating a widen memory
1148       // instruction. Widen memory instructions involved in address computation
1149       // will lead to gather/scatter instructions, which don't need to be
1150       // handled.
1151       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1152           isa<VPInterleaveRecipe>(CurRec) ||
1153           isa<VPCanonicalIVPHIRecipe>(CurRec))
1154         continue;
1155 
1156       // This recipe contributes to the address computation of a widen
1157       // load/store. Collect recipe if its underlying instruction has
1158       // poison-generating flags.
1159       Instruction *Instr = CurRec->getUnderlyingInstr();
1160       if (Instr && Instr->hasPoisonGeneratingFlags())
1161         State.MayGeneratePoisonRecipes.insert(CurRec);
1162 
1163       // Add new definitions to the worklist.
1164       for (VPValue *operand : CurRec->operands())
1165         if (VPDef *OpDef = operand->getDef())
1166           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1167     }
1168   });
1169 
1170   // Traverse all the recipes in the VPlan and collect the poison-generating
1171   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1172   // VPInterleaveRecipe.
1173   auto Iter = depth_first(
1174       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1175   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1176     for (VPRecipeBase &Recipe : *VPBB) {
1177       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1178         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1179         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1180         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1181             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1182           collectPoisonGeneratingInstrsInBackwardSlice(
1183               cast<VPRecipeBase>(AddrDef));
1184       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1185         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1186         if (AddrDef) {
1187           // Check if any member of the interleave group needs predication.
1188           const InterleaveGroup<Instruction> *InterGroup =
1189               InterleaveRec->getInterleaveGroup();
1190           bool NeedPredication = false;
1191           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1192                I < NumMembers; ++I) {
1193             Instruction *Member = InterGroup->getMember(I);
1194             if (Member)
1195               NeedPredication |=
1196                   Legal->blockNeedsPredication(Member->getParent());
1197           }
1198 
1199           if (NeedPredication)
1200             collectPoisonGeneratingInstrsInBackwardSlice(
1201                 cast<VPRecipeBase>(AddrDef));
1202         }
1203       }
1204     }
1205   }
1206 }
1207 
1208 void InnerLoopVectorizer::addMetadata(Instruction *To,
1209                                       Instruction *From) {
1210   propagateMetadata(To, From);
1211   addNewMetadata(To, From);
1212 }
1213 
1214 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1215                                       Instruction *From) {
1216   for (Value *V : To) {
1217     if (Instruction *I = dyn_cast<Instruction>(V))
1218       addMetadata(I, From);
1219   }
1220 }
1221 
1222 namespace llvm {
1223 
1224 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1225 // lowered.
1226 enum ScalarEpilogueLowering {
1227 
1228   // The default: allowing scalar epilogues.
1229   CM_ScalarEpilogueAllowed,
1230 
1231   // Vectorization with OptForSize: don't allow epilogues.
1232   CM_ScalarEpilogueNotAllowedOptSize,
1233 
1234   // A special case of vectorisation with OptForSize: loops with a very small
1235   // trip count are considered for vectorization under OptForSize, thereby
1236   // making sure the cost of their loop body is dominant, free of runtime
1237   // guards and scalar iteration overheads.
1238   CM_ScalarEpilogueNotAllowedLowTripLoop,
1239 
1240   // Loop hint predicate indicating an epilogue is undesired.
1241   CM_ScalarEpilogueNotNeededUsePredicate,
1242 
1243   // Directive indicating we must either tail fold or not vectorize
1244   CM_ScalarEpilogueNotAllowedUsePredicate
1245 };
1246 
1247 /// ElementCountComparator creates a total ordering for ElementCount
1248 /// for the purposes of using it in a set structure.
1249 struct ElementCountComparator {
1250   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1251     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1252            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1253   }
1254 };
1255 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1256 
1257 /// LoopVectorizationCostModel - estimates the expected speedups due to
1258 /// vectorization.
1259 /// In many cases vectorization is not profitable. This can happen because of
1260 /// a number of reasons. In this class we mainly attempt to predict the
1261 /// expected speedup/slowdowns due to the supported instruction set. We use the
1262 /// TargetTransformInfo to query the different backends for the cost of
1263 /// different operations.
1264 class LoopVectorizationCostModel {
1265 public:
1266   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1267                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1268                              LoopVectorizationLegality *Legal,
1269                              const TargetTransformInfo &TTI,
1270                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1271                              AssumptionCache *AC,
1272                              OptimizationRemarkEmitter *ORE, const Function *F,
1273                              const LoopVectorizeHints *Hints,
1274                              InterleavedAccessInfo &IAI)
1275       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1276         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1277         Hints(Hints), InterleaveInfo(IAI) {}
1278 
1279   /// \return An upper bound for the vectorization factors (both fixed and
1280   /// scalable). If the factors are 0, vectorization and interleaving should be
1281   /// avoided up front.
1282   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1283 
1284   /// \return True if runtime checks are required for vectorization, and false
1285   /// otherwise.
1286   bool runtimeChecksRequired();
1287 
1288   /// \return The most profitable vectorization factor and the cost of that VF.
1289   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1290   /// then this vectorization factor will be selected if vectorization is
1291   /// possible.
1292   VectorizationFactor
1293   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1294 
1295   VectorizationFactor
1296   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1297                                     const LoopVectorizationPlanner &LVP);
1298 
1299   /// Setup cost-based decisions for user vectorization factor.
1300   /// \return true if the UserVF is a feasible VF to be chosen.
1301   bool selectUserVectorizationFactor(ElementCount UserVF) {
1302     collectUniformsAndScalars(UserVF);
1303     collectInstsToScalarize(UserVF);
1304     return expectedCost(UserVF).first.isValid();
1305   }
1306 
1307   /// \return The size (in bits) of the smallest and widest types in the code
1308   /// that needs to be vectorized. We ignore values that remain scalar such as
1309   /// 64 bit loop indices.
1310   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1311 
1312   /// \return The desired interleave count.
1313   /// If interleave count has been specified by metadata it will be returned.
1314   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1315   /// are the selected vectorization factor and the cost of the selected VF.
1316   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1317 
1318   /// Memory access instruction may be vectorized in more than one way.
1319   /// Form of instruction after vectorization depends on cost.
1320   /// This function takes cost-based decisions for Load/Store instructions
1321   /// and collects them in a map. This decisions map is used for building
1322   /// the lists of loop-uniform and loop-scalar instructions.
1323   /// The calculated cost is saved with widening decision in order to
1324   /// avoid redundant calculations.
1325   void setCostBasedWideningDecision(ElementCount VF);
1326 
1327   /// A struct that represents some properties of the register usage
1328   /// of a loop.
1329   struct RegisterUsage {
1330     /// Holds the number of loop invariant values that are used in the loop.
1331     /// The key is ClassID of target-provided register class.
1332     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1333     /// Holds the maximum number of concurrent live intervals in the loop.
1334     /// The key is ClassID of target-provided register class.
1335     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1336   };
1337 
1338   /// \return Returns information about the register usages of the loop for the
1339   /// given vectorization factors.
1340   SmallVector<RegisterUsage, 8>
1341   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1342 
1343   /// Collect values we want to ignore in the cost model.
1344   void collectValuesToIgnore();
1345 
1346   /// Collect all element types in the loop for which widening is needed.
1347   void collectElementTypesForWidening();
1348 
1349   /// Split reductions into those that happen in the loop, and those that happen
1350   /// outside. In loop reductions are collected into InLoopReductionChains.
1351   void collectInLoopReductions();
1352 
1353   /// Returns true if we should use strict in-order reductions for the given
1354   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1355   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1356   /// of FP operations.
1357   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1358     return !Hints->allowReordering() && RdxDesc.isOrdered();
1359   }
1360 
1361   /// \returns The smallest bitwidth each instruction can be represented with.
1362   /// The vector equivalents of these instructions should be truncated to this
1363   /// type.
1364   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1365     return MinBWs;
1366   }
1367 
1368   /// \returns True if it is more profitable to scalarize instruction \p I for
1369   /// vectorization factor \p VF.
1370   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1371     assert(VF.isVector() &&
1372            "Profitable to scalarize relevant only for VF > 1.");
1373 
1374     // Cost model is not run in the VPlan-native path - return conservative
1375     // result until this changes.
1376     if (EnableVPlanNativePath)
1377       return false;
1378 
1379     auto Scalars = InstsToScalarize.find(VF);
1380     assert(Scalars != InstsToScalarize.end() &&
1381            "VF not yet analyzed for scalarization profitability");
1382     return Scalars->second.find(I) != Scalars->second.end();
1383   }
1384 
1385   /// Returns true if \p I is known to be uniform after vectorization.
1386   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1387     if (VF.isScalar())
1388       return true;
1389 
1390     // Cost model is not run in the VPlan-native path - return conservative
1391     // result until this changes.
1392     if (EnableVPlanNativePath)
1393       return false;
1394 
1395     auto UniformsPerVF = Uniforms.find(VF);
1396     assert(UniformsPerVF != Uniforms.end() &&
1397            "VF not yet analyzed for uniformity");
1398     return UniformsPerVF->second.count(I);
1399   }
1400 
1401   /// Returns true if \p I is known to be scalar after vectorization.
1402   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1403     if (VF.isScalar())
1404       return true;
1405 
1406     // Cost model is not run in the VPlan-native path - return conservative
1407     // result until this changes.
1408     if (EnableVPlanNativePath)
1409       return false;
1410 
1411     auto ScalarsPerVF = Scalars.find(VF);
1412     assert(ScalarsPerVF != Scalars.end() &&
1413            "Scalar values are not calculated for VF");
1414     return ScalarsPerVF->second.count(I);
1415   }
1416 
1417   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1418   /// for vectorization factor \p VF.
1419   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1420     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1421            !isProfitableToScalarize(I, VF) &&
1422            !isScalarAfterVectorization(I, VF);
1423   }
1424 
1425   /// Decision that was taken during cost calculation for memory instruction.
1426   enum InstWidening {
1427     CM_Unknown,
1428     CM_Widen,         // For consecutive accesses with stride +1.
1429     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1430     CM_Interleave,
1431     CM_GatherScatter,
1432     CM_Scalarize
1433   };
1434 
1435   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1436   /// instruction \p I and vector width \p VF.
1437   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1438                            InstructionCost Cost) {
1439     assert(VF.isVector() && "Expected VF >=2");
1440     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1441   }
1442 
1443   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1444   /// interleaving group \p Grp and vector width \p VF.
1445   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1446                            ElementCount VF, InstWidening W,
1447                            InstructionCost Cost) {
1448     assert(VF.isVector() && "Expected VF >=2");
1449     /// Broadcast this decicion to all instructions inside the group.
1450     /// But the cost will be assigned to one instruction only.
1451     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1452       if (auto *I = Grp->getMember(i)) {
1453         if (Grp->getInsertPos() == I)
1454           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1455         else
1456           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1457       }
1458     }
1459   }
1460 
1461   /// Return the cost model decision for the given instruction \p I and vector
1462   /// width \p VF. Return CM_Unknown if this instruction did not pass
1463   /// through the cost modeling.
1464   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1465     assert(VF.isVector() && "Expected VF to be a vector VF");
1466     // Cost model is not run in the VPlan-native path - return conservative
1467     // result until this changes.
1468     if (EnableVPlanNativePath)
1469       return CM_GatherScatter;
1470 
1471     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1472     auto Itr = WideningDecisions.find(InstOnVF);
1473     if (Itr == WideningDecisions.end())
1474       return CM_Unknown;
1475     return Itr->second.first;
1476   }
1477 
1478   /// Return the vectorization cost for the given instruction \p I and vector
1479   /// width \p VF.
1480   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1481     assert(VF.isVector() && "Expected VF >=2");
1482     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1483     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1484            "The cost is not calculated");
1485     return WideningDecisions[InstOnVF].second;
1486   }
1487 
1488   /// Return True if instruction \p I is an optimizable truncate whose operand
1489   /// is an induction variable. Such a truncate will be removed by adding a new
1490   /// induction variable with the destination type.
1491   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1492     // If the instruction is not a truncate, return false.
1493     auto *Trunc = dyn_cast<TruncInst>(I);
1494     if (!Trunc)
1495       return false;
1496 
1497     // Get the source and destination types of the truncate.
1498     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1499     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1500 
1501     // If the truncate is free for the given types, return false. Replacing a
1502     // free truncate with an induction variable would add an induction variable
1503     // update instruction to each iteration of the loop. We exclude from this
1504     // check the primary induction variable since it will need an update
1505     // instruction regardless.
1506     Value *Op = Trunc->getOperand(0);
1507     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1508       return false;
1509 
1510     // If the truncated value is not an induction variable, return false.
1511     return Legal->isInductionPhi(Op);
1512   }
1513 
1514   /// Collects the instructions to scalarize for each predicated instruction in
1515   /// the loop.
1516   void collectInstsToScalarize(ElementCount VF);
1517 
1518   /// Collect Uniform and Scalar values for the given \p VF.
1519   /// The sets depend on CM decision for Load/Store instructions
1520   /// that may be vectorized as interleave, gather-scatter or scalarized.
1521   void collectUniformsAndScalars(ElementCount VF) {
1522     // Do the analysis once.
1523     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1524       return;
1525     setCostBasedWideningDecision(VF);
1526     collectLoopUniforms(VF);
1527     collectLoopScalars(VF);
1528   }
1529 
1530   /// Returns true if the target machine supports masked store operation
1531   /// for the given \p DataType and kind of access to \p Ptr.
1532   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1533     return Legal->isConsecutivePtr(DataType, Ptr) &&
1534            TTI.isLegalMaskedStore(DataType, Alignment);
1535   }
1536 
1537   /// Returns true if the target machine supports masked load operation
1538   /// for the given \p DataType and kind of access to \p Ptr.
1539   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1540     return Legal->isConsecutivePtr(DataType, Ptr) &&
1541            TTI.isLegalMaskedLoad(DataType, Alignment);
1542   }
1543 
1544   /// Returns true if the target machine can represent \p V as a masked gather
1545   /// or scatter operation.
1546   bool isLegalGatherOrScatter(Value *V,
1547                               ElementCount VF = ElementCount::getFixed(1)) {
1548     bool LI = isa<LoadInst>(V);
1549     bool SI = isa<StoreInst>(V);
1550     if (!LI && !SI)
1551       return false;
1552     auto *Ty = getLoadStoreType(V);
1553     Align Align = getLoadStoreAlignment(V);
1554     if (VF.isVector())
1555       Ty = VectorType::get(Ty, VF);
1556     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1557            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1558   }
1559 
1560   /// Returns true if the target machine supports all of the reduction
1561   /// variables found for the given VF.
1562   bool canVectorizeReductions(ElementCount VF) const {
1563     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1564       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1565       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1566     }));
1567   }
1568 
1569   /// Returns true if \p I is an instruction that will be scalarized with
1570   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1571   /// instructions include conditional stores and instructions that may divide
1572   /// by zero.
1573   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1574 
1575   // Returns true if \p I is an instruction that will be predicated either
1576   // through scalar predication or masked load/store or masked gather/scatter.
1577   // \p VF is the vectorization factor that will be used to vectorize \p I.
1578   // Superset of instructions that return true for isScalarWithPredication.
1579   bool isPredicatedInst(Instruction *I, ElementCount VF,
1580                         bool IsKnownUniform = false) {
1581     // When we know the load is uniform and the original scalar loop was not
1582     // predicated we don't need to mark it as a predicated instruction. Any
1583     // vectorised blocks created when tail-folding are something artificial we
1584     // have introduced and we know there is always at least one active lane.
1585     // That's why we call Legal->blockNeedsPredication here because it doesn't
1586     // query tail-folding.
1587     if (IsKnownUniform && isa<LoadInst>(I) &&
1588         !Legal->blockNeedsPredication(I->getParent()))
1589       return false;
1590     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1591       return false;
1592     // Loads and stores that need some form of masked operation are predicated
1593     // instructions.
1594     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1595       return Legal->isMaskRequired(I);
1596     return isScalarWithPredication(I, VF);
1597   }
1598 
1599   /// Returns true if \p I is a memory instruction with consecutive memory
1600   /// access that can be widened.
1601   bool
1602   memoryInstructionCanBeWidened(Instruction *I,
1603                                 ElementCount VF = ElementCount::getFixed(1));
1604 
1605   /// Returns true if \p I is a memory instruction in an interleaved-group
1606   /// of memory accesses that can be vectorized with wide vector loads/stores
1607   /// and shuffles.
1608   bool
1609   interleavedAccessCanBeWidened(Instruction *I,
1610                                 ElementCount VF = ElementCount::getFixed(1));
1611 
1612   /// Check if \p Instr belongs to any interleaved access group.
1613   bool isAccessInterleaved(Instruction *Instr) {
1614     return InterleaveInfo.isInterleaved(Instr);
1615   }
1616 
1617   /// Get the interleaved access group that \p Instr belongs to.
1618   const InterleaveGroup<Instruction> *
1619   getInterleavedAccessGroup(Instruction *Instr) {
1620     return InterleaveInfo.getInterleaveGroup(Instr);
1621   }
1622 
1623   /// Returns true if we're required to use a scalar epilogue for at least
1624   /// the final iteration of the original loop.
1625   bool requiresScalarEpilogue(ElementCount VF) const {
1626     if (!isScalarEpilogueAllowed())
1627       return false;
1628     // If we might exit from anywhere but the latch, must run the exiting
1629     // iteration in scalar form.
1630     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1631       return true;
1632     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1633   }
1634 
1635   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1636   /// loop hint annotation.
1637   bool isScalarEpilogueAllowed() const {
1638     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1639   }
1640 
1641   /// Returns true if all loop blocks should be masked to fold tail loop.
1642   bool foldTailByMasking() const { return FoldTailByMasking; }
1643 
1644   /// Returns true if the instructions in this block requires predication
1645   /// for any reason, e.g. because tail folding now requires a predicate
1646   /// or because the block in the original loop was predicated.
1647   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1648     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1649   }
1650 
1651   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1652   /// nodes to the chain of instructions representing the reductions. Uses a
1653   /// MapVector to ensure deterministic iteration order.
1654   using ReductionChainMap =
1655       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1656 
1657   /// Return the chain of instructions representing an inloop reduction.
1658   const ReductionChainMap &getInLoopReductionChains() const {
1659     return InLoopReductionChains;
1660   }
1661 
1662   /// Returns true if the Phi is part of an inloop reduction.
1663   bool isInLoopReduction(PHINode *Phi) const {
1664     return InLoopReductionChains.count(Phi);
1665   }
1666 
1667   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1668   /// with factor VF.  Return the cost of the instruction, including
1669   /// scalarization overhead if it's needed.
1670   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1671 
1672   /// Estimate cost of a call instruction CI if it were vectorized with factor
1673   /// VF. Return the cost of the instruction, including scalarization overhead
1674   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1675   /// scalarized -
1676   /// i.e. either vector version isn't available, or is too expensive.
1677   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1678                                     bool &NeedToScalarize) const;
1679 
1680   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1681   /// that of B.
1682   bool isMoreProfitable(const VectorizationFactor &A,
1683                         const VectorizationFactor &B) const;
1684 
1685   /// Invalidates decisions already taken by the cost model.
1686   void invalidateCostModelingDecisions() {
1687     WideningDecisions.clear();
1688     Uniforms.clear();
1689     Scalars.clear();
1690   }
1691 
1692 private:
1693   unsigned NumPredStores = 0;
1694 
1695   /// \return An upper bound for the vectorization factors for both
1696   /// fixed and scalable vectorization, where the minimum-known number of
1697   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1698   /// disabled or unsupported, then the scalable part will be equal to
1699   /// ElementCount::getScalable(0).
1700   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1701                                            ElementCount UserVF,
1702                                            bool FoldTailByMasking);
1703 
1704   /// \return the maximized element count based on the targets vector
1705   /// registers and the loop trip-count, but limited to a maximum safe VF.
1706   /// This is a helper function of computeFeasibleMaxVF.
1707   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1708   /// issue that occurred on one of the buildbots which cannot be reproduced
1709   /// without having access to the properietary compiler (see comments on
1710   /// D98509). The issue is currently under investigation and this workaround
1711   /// will be removed as soon as possible.
1712   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1713                                        unsigned SmallestType,
1714                                        unsigned WidestType,
1715                                        const ElementCount &MaxSafeVF,
1716                                        bool FoldTailByMasking);
1717 
1718   /// \return the maximum legal scalable VF, based on the safe max number
1719   /// of elements.
1720   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1721 
1722   /// The vectorization cost is a combination of the cost itself and a boolean
1723   /// indicating whether any of the contributing operations will actually
1724   /// operate on vector values after type legalization in the backend. If this
1725   /// latter value is false, then all operations will be scalarized (i.e. no
1726   /// vectorization has actually taken place).
1727   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1728 
1729   /// Returns the expected execution cost. The unit of the cost does
1730   /// not matter because we use the 'cost' units to compare different
1731   /// vector widths. The cost that is returned is *not* normalized by
1732   /// the factor width. If \p Invalid is not nullptr, this function
1733   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1734   /// each instruction that has an Invalid cost for the given VF.
1735   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1736   VectorizationCostTy
1737   expectedCost(ElementCount VF,
1738                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1739 
1740   /// Returns the execution time cost of an instruction for a given vector
1741   /// width. Vector width of one means scalar.
1742   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1743 
1744   /// The cost-computation logic from getInstructionCost which provides
1745   /// the vector type as an output parameter.
1746   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1747                                      Type *&VectorTy);
1748 
1749   /// Return the cost of instructions in an inloop reduction pattern, if I is
1750   /// part of that pattern.
1751   Optional<InstructionCost>
1752   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1753                           TTI::TargetCostKind CostKind);
1754 
1755   /// Calculate vectorization cost of memory instruction \p I.
1756   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1757 
1758   /// The cost computation for scalarized memory instruction.
1759   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1760 
1761   /// The cost computation for interleaving group of memory instructions.
1762   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1763 
1764   /// The cost computation for Gather/Scatter instruction.
1765   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1766 
1767   /// The cost computation for widening instruction \p I with consecutive
1768   /// memory access.
1769   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1770 
1771   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1772   /// Load: scalar load + broadcast.
1773   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1774   /// element)
1775   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1776 
1777   /// Estimate the overhead of scalarizing an instruction. This is a
1778   /// convenience wrapper for the type-based getScalarizationOverhead API.
1779   InstructionCost getScalarizationOverhead(Instruction *I,
1780                                            ElementCount VF) const;
1781 
1782   /// Returns whether the instruction is a load or store and will be a emitted
1783   /// as a vector operation.
1784   bool isConsecutiveLoadOrStore(Instruction *I);
1785 
1786   /// Returns true if an artificially high cost for emulated masked memrefs
1787   /// should be used.
1788   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1789 
1790   /// Map of scalar integer values to the smallest bitwidth they can be legally
1791   /// represented as. The vector equivalents of these values should be truncated
1792   /// to this type.
1793   MapVector<Instruction *, uint64_t> MinBWs;
1794 
1795   /// A type representing the costs for instructions if they were to be
1796   /// scalarized rather than vectorized. The entries are Instruction-Cost
1797   /// pairs.
1798   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1799 
1800   /// A set containing all BasicBlocks that are known to present after
1801   /// vectorization as a predicated block.
1802   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1803 
1804   /// Records whether it is allowed to have the original scalar loop execute at
1805   /// least once. This may be needed as a fallback loop in case runtime
1806   /// aliasing/dependence checks fail, or to handle the tail/remainder
1807   /// iterations when the trip count is unknown or doesn't divide by the VF,
1808   /// or as a peel-loop to handle gaps in interleave-groups.
1809   /// Under optsize and when the trip count is very small we don't allow any
1810   /// iterations to execute in the scalar loop.
1811   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1812 
1813   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1814   bool FoldTailByMasking = false;
1815 
1816   /// A map holding scalar costs for different vectorization factors. The
1817   /// presence of a cost for an instruction in the mapping indicates that the
1818   /// instruction will be scalarized when vectorizing with the associated
1819   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1820   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1821 
1822   /// Holds the instructions known to be uniform after vectorization.
1823   /// The data is collected per VF.
1824   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1825 
1826   /// Holds the instructions known to be scalar after vectorization.
1827   /// The data is collected per VF.
1828   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1829 
1830   /// Holds the instructions (address computations) that are forced to be
1831   /// scalarized.
1832   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1833 
1834   /// PHINodes of the reductions that should be expanded in-loop along with
1835   /// their associated chains of reduction operations, in program order from top
1836   /// (PHI) to bottom
1837   ReductionChainMap InLoopReductionChains;
1838 
1839   /// A Map of inloop reduction operations and their immediate chain operand.
1840   /// FIXME: This can be removed once reductions can be costed correctly in
1841   /// vplan. This was added to allow quick lookup to the inloop operations,
1842   /// without having to loop through InLoopReductionChains.
1843   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1844 
1845   /// Returns the expected difference in cost from scalarizing the expression
1846   /// feeding a predicated instruction \p PredInst. The instructions to
1847   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1848   /// non-negative return value implies the expression will be scalarized.
1849   /// Currently, only single-use chains are considered for scalarization.
1850   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1851                               ElementCount VF);
1852 
1853   /// Collect the instructions that are uniform after vectorization. An
1854   /// instruction is uniform if we represent it with a single scalar value in
1855   /// the vectorized loop corresponding to each vector iteration. Examples of
1856   /// uniform instructions include pointer operands of consecutive or
1857   /// interleaved memory accesses. Note that although uniformity implies an
1858   /// instruction will be scalar, the reverse is not true. In general, a
1859   /// scalarized instruction will be represented by VF scalar values in the
1860   /// vectorized loop, each corresponding to an iteration of the original
1861   /// scalar loop.
1862   void collectLoopUniforms(ElementCount VF);
1863 
1864   /// Collect the instructions that are scalar after vectorization. An
1865   /// instruction is scalar if it is known to be uniform or will be scalarized
1866   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1867   /// to the list if they are used by a load/store instruction that is marked as
1868   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1869   /// VF values in the vectorized loop, each corresponding to an iteration of
1870   /// the original scalar loop.
1871   void collectLoopScalars(ElementCount VF);
1872 
1873   /// Keeps cost model vectorization decision and cost for instructions.
1874   /// Right now it is used for memory instructions only.
1875   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1876                                 std::pair<InstWidening, InstructionCost>>;
1877 
1878   DecisionList WideningDecisions;
1879 
1880   /// Returns true if \p V is expected to be vectorized and it needs to be
1881   /// extracted.
1882   bool needsExtract(Value *V, ElementCount VF) const {
1883     Instruction *I = dyn_cast<Instruction>(V);
1884     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1885         TheLoop->isLoopInvariant(I))
1886       return false;
1887 
1888     // Assume we can vectorize V (and hence we need extraction) if the
1889     // scalars are not computed yet. This can happen, because it is called
1890     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1891     // the scalars are collected. That should be a safe assumption in most
1892     // cases, because we check if the operands have vectorizable types
1893     // beforehand in LoopVectorizationLegality.
1894     return Scalars.find(VF) == Scalars.end() ||
1895            !isScalarAfterVectorization(I, VF);
1896   };
1897 
1898   /// Returns a range containing only operands needing to be extracted.
1899   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1900                                                    ElementCount VF) const {
1901     return SmallVector<Value *, 4>(make_filter_range(
1902         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1903   }
1904 
1905   /// Determines if we have the infrastructure to vectorize loop \p L and its
1906   /// epilogue, assuming the main loop is vectorized by \p VF.
1907   bool isCandidateForEpilogueVectorization(const Loop &L,
1908                                            const ElementCount VF) const;
1909 
1910   /// Returns true if epilogue vectorization is considered profitable, and
1911   /// false otherwise.
1912   /// \p VF is the vectorization factor chosen for the original loop.
1913   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1914 
1915 public:
1916   /// The loop that we evaluate.
1917   Loop *TheLoop;
1918 
1919   /// Predicated scalar evolution analysis.
1920   PredicatedScalarEvolution &PSE;
1921 
1922   /// Loop Info analysis.
1923   LoopInfo *LI;
1924 
1925   /// Vectorization legality.
1926   LoopVectorizationLegality *Legal;
1927 
1928   /// Vector target information.
1929   const TargetTransformInfo &TTI;
1930 
1931   /// Target Library Info.
1932   const TargetLibraryInfo *TLI;
1933 
1934   /// Demanded bits analysis.
1935   DemandedBits *DB;
1936 
1937   /// Assumption cache.
1938   AssumptionCache *AC;
1939 
1940   /// Interface to emit optimization remarks.
1941   OptimizationRemarkEmitter *ORE;
1942 
1943   const Function *TheFunction;
1944 
1945   /// Loop Vectorize Hint.
1946   const LoopVectorizeHints *Hints;
1947 
1948   /// The interleave access information contains groups of interleaved accesses
1949   /// with the same stride and close to each other.
1950   InterleavedAccessInfo &InterleaveInfo;
1951 
1952   /// Values to ignore in the cost model.
1953   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1954 
1955   /// Values to ignore in the cost model when VF > 1.
1956   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1957 
1958   /// All element types found in the loop.
1959   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1960 
1961   /// Profitable vector factors.
1962   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1963 };
1964 } // end namespace llvm
1965 
1966 /// Helper struct to manage generating runtime checks for vectorization.
1967 ///
1968 /// The runtime checks are created up-front in temporary blocks to allow better
1969 /// estimating the cost and un-linked from the existing IR. After deciding to
1970 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1971 /// temporary blocks are completely removed.
1972 class GeneratedRTChecks {
1973   /// Basic block which contains the generated SCEV checks, if any.
1974   BasicBlock *SCEVCheckBlock = nullptr;
1975 
1976   /// The value representing the result of the generated SCEV checks. If it is
1977   /// nullptr, either no SCEV checks have been generated or they have been used.
1978   Value *SCEVCheckCond = nullptr;
1979 
1980   /// Basic block which contains the generated memory runtime checks, if any.
1981   BasicBlock *MemCheckBlock = nullptr;
1982 
1983   /// The value representing the result of the generated memory runtime checks.
1984   /// If it is nullptr, either no memory runtime checks have been generated or
1985   /// they have been used.
1986   Value *MemRuntimeCheckCond = nullptr;
1987 
1988   DominatorTree *DT;
1989   LoopInfo *LI;
1990 
1991   SCEVExpander SCEVExp;
1992   SCEVExpander MemCheckExp;
1993 
1994 public:
1995   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1996                     const DataLayout &DL)
1997       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1998         MemCheckExp(SE, DL, "scev.check") {}
1999 
2000   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
2001   /// accurately estimate the cost of the runtime checks. The blocks are
2002   /// un-linked from the IR and is added back during vector code generation. If
2003   /// there is no vector code generation, the check blocks are removed
2004   /// completely.
2005   void Create(Loop *L, const LoopAccessInfo &LAI,
2006               const SCEVUnionPredicate &UnionPred) {
2007 
2008     BasicBlock *LoopHeader = L->getHeader();
2009     BasicBlock *Preheader = L->getLoopPreheader();
2010 
2011     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
2012     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
2013     // may be used by SCEVExpander. The blocks will be un-linked from their
2014     // predecessors and removed from LI & DT at the end of the function.
2015     if (!UnionPred.isAlwaysTrue()) {
2016       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
2017                                   nullptr, "vector.scevcheck");
2018 
2019       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
2020           &UnionPred, SCEVCheckBlock->getTerminator());
2021     }
2022 
2023     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
2024     if (RtPtrChecking.Need) {
2025       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2026       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2027                                  "vector.memcheck");
2028 
2029       MemRuntimeCheckCond =
2030           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2031                            RtPtrChecking.getChecks(), MemCheckExp);
2032       assert(MemRuntimeCheckCond &&
2033              "no RT checks generated although RtPtrChecking "
2034              "claimed checks are required");
2035     }
2036 
2037     if (!MemCheckBlock && !SCEVCheckBlock)
2038       return;
2039 
2040     // Unhook the temporary block with the checks, update various places
2041     // accordingly.
2042     if (SCEVCheckBlock)
2043       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2044     if (MemCheckBlock)
2045       MemCheckBlock->replaceAllUsesWith(Preheader);
2046 
2047     if (SCEVCheckBlock) {
2048       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2049       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2050       Preheader->getTerminator()->eraseFromParent();
2051     }
2052     if (MemCheckBlock) {
2053       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2054       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2055       Preheader->getTerminator()->eraseFromParent();
2056     }
2057 
2058     DT->changeImmediateDominator(LoopHeader, Preheader);
2059     if (MemCheckBlock) {
2060       DT->eraseNode(MemCheckBlock);
2061       LI->removeBlock(MemCheckBlock);
2062     }
2063     if (SCEVCheckBlock) {
2064       DT->eraseNode(SCEVCheckBlock);
2065       LI->removeBlock(SCEVCheckBlock);
2066     }
2067   }
2068 
2069   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2070   /// unused.
2071   ~GeneratedRTChecks() {
2072     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2073     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2074     if (!SCEVCheckCond)
2075       SCEVCleaner.markResultUsed();
2076 
2077     if (!MemRuntimeCheckCond)
2078       MemCheckCleaner.markResultUsed();
2079 
2080     if (MemRuntimeCheckCond) {
2081       auto &SE = *MemCheckExp.getSE();
2082       // Memory runtime check generation creates compares that use expanded
2083       // values. Remove them before running the SCEVExpanderCleaners.
2084       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2085         if (MemCheckExp.isInsertedInstruction(&I))
2086           continue;
2087         SE.forgetValue(&I);
2088         I.eraseFromParent();
2089       }
2090     }
2091     MemCheckCleaner.cleanup();
2092     SCEVCleaner.cleanup();
2093 
2094     if (SCEVCheckCond)
2095       SCEVCheckBlock->eraseFromParent();
2096     if (MemRuntimeCheckCond)
2097       MemCheckBlock->eraseFromParent();
2098   }
2099 
2100   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2101   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2102   /// depending on the generated condition.
2103   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2104                              BasicBlock *LoopVectorPreHeader,
2105                              BasicBlock *LoopExitBlock) {
2106     if (!SCEVCheckCond)
2107       return nullptr;
2108     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2109       if (C->isZero())
2110         return nullptr;
2111 
2112     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2113 
2114     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2115     // Create new preheader for vector loop.
2116     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2117       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2118 
2119     SCEVCheckBlock->getTerminator()->eraseFromParent();
2120     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2121     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2122                                                 SCEVCheckBlock);
2123 
2124     DT->addNewBlock(SCEVCheckBlock, Pred);
2125     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2126 
2127     ReplaceInstWithInst(
2128         SCEVCheckBlock->getTerminator(),
2129         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2130     // Mark the check as used, to prevent it from being removed during cleanup.
2131     SCEVCheckCond = nullptr;
2132     return SCEVCheckBlock;
2133   }
2134 
2135   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2136   /// the branches to branch to the vector preheader or \p Bypass, depending on
2137   /// the generated condition.
2138   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2139                                    BasicBlock *LoopVectorPreHeader) {
2140     // Check if we generated code that checks in runtime if arrays overlap.
2141     if (!MemRuntimeCheckCond)
2142       return nullptr;
2143 
2144     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2145     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2146                                                 MemCheckBlock);
2147 
2148     DT->addNewBlock(MemCheckBlock, Pred);
2149     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2150     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2151 
2152     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2153       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2154 
2155     ReplaceInstWithInst(
2156         MemCheckBlock->getTerminator(),
2157         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2158     MemCheckBlock->getTerminator()->setDebugLoc(
2159         Pred->getTerminator()->getDebugLoc());
2160 
2161     // Mark the check as used, to prevent it from being removed during cleanup.
2162     MemRuntimeCheckCond = nullptr;
2163     return MemCheckBlock;
2164   }
2165 };
2166 
2167 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2168 // vectorization. The loop needs to be annotated with #pragma omp simd
2169 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2170 // vector length information is not provided, vectorization is not considered
2171 // explicit. Interleave hints are not allowed either. These limitations will be
2172 // relaxed in the future.
2173 // Please, note that we are currently forced to abuse the pragma 'clang
2174 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2175 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2176 // provides *explicit vectorization hints* (LV can bypass legal checks and
2177 // assume that vectorization is legal). However, both hints are implemented
2178 // using the same metadata (llvm.loop.vectorize, processed by
2179 // LoopVectorizeHints). This will be fixed in the future when the native IR
2180 // representation for pragma 'omp simd' is introduced.
2181 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2182                                    OptimizationRemarkEmitter *ORE) {
2183   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2184   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2185 
2186   // Only outer loops with an explicit vectorization hint are supported.
2187   // Unannotated outer loops are ignored.
2188   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2189     return false;
2190 
2191   Function *Fn = OuterLp->getHeader()->getParent();
2192   if (!Hints.allowVectorization(Fn, OuterLp,
2193                                 true /*VectorizeOnlyWhenForced*/)) {
2194     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2195     return false;
2196   }
2197 
2198   if (Hints.getInterleave() > 1) {
2199     // TODO: Interleave support is future work.
2200     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2201                          "outer loops.\n");
2202     Hints.emitRemarkWithHints();
2203     return false;
2204   }
2205 
2206   return true;
2207 }
2208 
2209 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2210                                   OptimizationRemarkEmitter *ORE,
2211                                   SmallVectorImpl<Loop *> &V) {
2212   // Collect inner loops and outer loops without irreducible control flow. For
2213   // now, only collect outer loops that have explicit vectorization hints. If we
2214   // are stress testing the VPlan H-CFG construction, we collect the outermost
2215   // loop of every loop nest.
2216   if (L.isInnermost() || VPlanBuildStressTest ||
2217       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2218     LoopBlocksRPO RPOT(&L);
2219     RPOT.perform(LI);
2220     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2221       V.push_back(&L);
2222       // TODO: Collect inner loops inside marked outer loops in case
2223       // vectorization fails for the outer loop. Do not invoke
2224       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2225       // already known to be reducible. We can use an inherited attribute for
2226       // that.
2227       return;
2228     }
2229   }
2230   for (Loop *InnerL : L)
2231     collectSupportedLoops(*InnerL, LI, ORE, V);
2232 }
2233 
2234 namespace {
2235 
2236 /// The LoopVectorize Pass.
2237 struct LoopVectorize : public FunctionPass {
2238   /// Pass identification, replacement for typeid
2239   static char ID;
2240 
2241   LoopVectorizePass Impl;
2242 
2243   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2244                          bool VectorizeOnlyWhenForced = false)
2245       : FunctionPass(ID),
2246         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2247     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2248   }
2249 
2250   bool runOnFunction(Function &F) override {
2251     if (skipFunction(F))
2252       return false;
2253 
2254     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2255     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2256     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2257     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2258     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2259     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2260     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2261     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2262     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2263     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2264     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2265     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2266     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2267 
2268     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2269         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2270 
2271     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2272                         GetLAA, *ORE, PSI).MadeAnyChange;
2273   }
2274 
2275   void getAnalysisUsage(AnalysisUsage &AU) const override {
2276     AU.addRequired<AssumptionCacheTracker>();
2277     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2278     AU.addRequired<DominatorTreeWrapperPass>();
2279     AU.addRequired<LoopInfoWrapperPass>();
2280     AU.addRequired<ScalarEvolutionWrapperPass>();
2281     AU.addRequired<TargetTransformInfoWrapperPass>();
2282     AU.addRequired<AAResultsWrapperPass>();
2283     AU.addRequired<LoopAccessLegacyAnalysis>();
2284     AU.addRequired<DemandedBitsWrapperPass>();
2285     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2286     AU.addRequired<InjectTLIMappingsLegacy>();
2287 
2288     // We currently do not preserve loopinfo/dominator analyses with outer loop
2289     // vectorization. Until this is addressed, mark these analyses as preserved
2290     // only for non-VPlan-native path.
2291     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2292     if (!EnableVPlanNativePath) {
2293       AU.addPreserved<LoopInfoWrapperPass>();
2294       AU.addPreserved<DominatorTreeWrapperPass>();
2295     }
2296 
2297     AU.addPreserved<BasicAAWrapperPass>();
2298     AU.addPreserved<GlobalsAAWrapperPass>();
2299     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2300   }
2301 };
2302 
2303 } // end anonymous namespace
2304 
2305 //===----------------------------------------------------------------------===//
2306 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2307 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2308 //===----------------------------------------------------------------------===//
2309 
2310 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2311   // We need to place the broadcast of invariant variables outside the loop,
2312   // but only if it's proven safe to do so. Else, broadcast will be inside
2313   // vector loop body.
2314   Instruction *Instr = dyn_cast<Instruction>(V);
2315   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2316                      (!Instr ||
2317                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2318   // Place the code for broadcasting invariant variables in the new preheader.
2319   IRBuilder<>::InsertPointGuard Guard(Builder);
2320   if (SafeToHoist)
2321     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2322 
2323   // Broadcast the scalar into all locations in the vector.
2324   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2325 
2326   return Shuf;
2327 }
2328 
2329 /// This function adds
2330 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2331 /// to each vector element of Val. The sequence starts at StartIndex.
2332 /// \p Opcode is relevant for FP induction variable.
2333 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2334                             Instruction::BinaryOps BinOp, ElementCount VF,
2335                             IRBuilder<> &Builder) {
2336   assert(VF.isVector() && "only vector VFs are supported");
2337 
2338   // Create and check the types.
2339   auto *ValVTy = cast<VectorType>(Val->getType());
2340   ElementCount VLen = ValVTy->getElementCount();
2341 
2342   Type *STy = Val->getType()->getScalarType();
2343   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2344          "Induction Step must be an integer or FP");
2345   assert(Step->getType() == STy && "Step has wrong type");
2346 
2347   SmallVector<Constant *, 8> Indices;
2348 
2349   // Create a vector of consecutive numbers from zero to VF.
2350   VectorType *InitVecValVTy = ValVTy;
2351   Type *InitVecValSTy = STy;
2352   if (STy->isFloatingPointTy()) {
2353     InitVecValSTy =
2354         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2355     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2356   }
2357   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2358 
2359   // Splat the StartIdx
2360   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2361 
2362   if (STy->isIntegerTy()) {
2363     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2364     Step = Builder.CreateVectorSplat(VLen, Step);
2365     assert(Step->getType() == Val->getType() && "Invalid step vec");
2366     // FIXME: The newly created binary instructions should contain nsw/nuw
2367     // flags, which can be found from the original scalar operations.
2368     Step = Builder.CreateMul(InitVec, Step);
2369     return Builder.CreateAdd(Val, Step, "induction");
2370   }
2371 
2372   // Floating point induction.
2373   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2374          "Binary Opcode should be specified for FP induction");
2375   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2376   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2377 
2378   Step = Builder.CreateVectorSplat(VLen, Step);
2379   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2380   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2381 }
2382 
2383 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2384     const InductionDescriptor &II, Value *Step, Value *Start,
2385     Instruction *EntryVal, VPValue *Def, VPTransformState &State) {
2386   IRBuilder<> &Builder = State.Builder;
2387   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2388          "Expected either an induction phi-node or a truncate of it!");
2389 
2390   // Construct the initial value of the vector IV in the vector loop preheader
2391   auto CurrIP = Builder.saveIP();
2392   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2393   if (isa<TruncInst>(EntryVal)) {
2394     assert(Start->getType()->isIntegerTy() &&
2395            "Truncation requires an integer type");
2396     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2397     Step = Builder.CreateTrunc(Step, TruncType);
2398     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2399   }
2400 
2401   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
2402   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
2403   Value *SteppedStart = getStepVector(
2404       SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder);
2405 
2406   // We create vector phi nodes for both integer and floating-point induction
2407   // variables. Here, we determine the kind of arithmetic we will perform.
2408   Instruction::BinaryOps AddOp;
2409   Instruction::BinaryOps MulOp;
2410   if (Step->getType()->isIntegerTy()) {
2411     AddOp = Instruction::Add;
2412     MulOp = Instruction::Mul;
2413   } else {
2414     AddOp = II.getInductionOpcode();
2415     MulOp = Instruction::FMul;
2416   }
2417 
2418   // Multiply the vectorization factor by the step using integer or
2419   // floating-point arithmetic as appropriate.
2420   Type *StepType = Step->getType();
2421   Value *RuntimeVF;
2422   if (Step->getType()->isFloatingPointTy())
2423     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
2424   else
2425     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
2426   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2427 
2428   // Create a vector splat to use in the induction update.
2429   //
2430   // FIXME: If the step is non-constant, we create the vector splat with
2431   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2432   //        handle a constant vector splat.
2433   Value *SplatVF = isa<Constant>(Mul)
2434                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
2435                        : Builder.CreateVectorSplat(State.VF, Mul);
2436   Builder.restoreIP(CurrIP);
2437 
2438   // We may need to add the step a number of times, depending on the unroll
2439   // factor. The last of those goes into the PHI.
2440   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2441                                     &*LoopVectorBody->getFirstInsertionPt());
2442   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2443   Instruction *LastInduction = VecInd;
2444   for (unsigned Part = 0; Part < UF; ++Part) {
2445     State.set(Def, LastInduction, Part);
2446 
2447     if (isa<TruncInst>(EntryVal))
2448       addMetadata(LastInduction, EntryVal);
2449 
2450     LastInduction = cast<Instruction>(
2451         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2452     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2453   }
2454 
2455   // Move the last step to the end of the latch block. This ensures consistent
2456   // placement of all induction updates.
2457   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2458   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2459   LastInduction->moveBefore(Br);
2460   LastInduction->setName("vec.ind.next");
2461 
2462   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2463   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2464 }
2465 
2466 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2467   return Cost->isScalarAfterVectorization(I, VF) ||
2468          Cost->isProfitableToScalarize(I, VF);
2469 }
2470 
2471 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2472   if (shouldScalarizeInstruction(IV))
2473     return true;
2474   auto isScalarInst = [&](User *U) -> bool {
2475     auto *I = cast<Instruction>(U);
2476     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2477   };
2478   return llvm::any_of(IV->users(), isScalarInst);
2479 }
2480 
2481 /// Returns true if \p ID starts at 0 and has a step of 1.
2482 static bool isCanonicalID(const InductionDescriptor &ID) {
2483   if (!ID.getConstIntStepValue() || !ID.getConstIntStepValue()->isOne())
2484     return false;
2485   auto *StartC = dyn_cast<ConstantInt>(ID.getStartValue());
2486   return StartC && StartC->isZero();
2487 }
2488 
2489 void InnerLoopVectorizer::widenIntOrFpInduction(
2490     PHINode *IV, const InductionDescriptor &ID, Value *Start, TruncInst *Trunc,
2491     VPValue *Def, VPTransformState &State, Value *CanonicalIV) {
2492   IRBuilder<> &Builder = State.Builder;
2493   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2494   assert(!State.VF.isZero() && "VF must be non-zero");
2495 
2496   // The value from the original loop to which we are mapping the new induction
2497   // variable.
2498   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2499 
2500   auto &DL = EntryVal->getModule()->getDataLayout();
2501 
2502   // Generate code for the induction step. Note that induction steps are
2503   // required to be loop-invariant
2504   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2505     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2506            "Induction step should be loop invariant");
2507     if (PSE.getSE()->isSCEVable(IV->getType())) {
2508       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2509       return Exp.expandCodeFor(Step, Step->getType(),
2510                                State.CFG.VectorPreHeader->getTerminator());
2511     }
2512     return cast<SCEVUnknown>(Step)->getValue();
2513   };
2514 
2515   // The scalar value to broadcast. This is derived from the canonical
2516   // induction variable. If a truncation type is given, truncate the canonical
2517   // induction variable and step. Otherwise, derive these values from the
2518   // induction descriptor.
2519   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2520     Value *ScalarIV = CanonicalIV;
2521     Type *NeededType = IV->getType();
2522     if (!isCanonicalID(ID) || ScalarIV->getType() != NeededType) {
2523       ScalarIV =
2524           NeededType->isIntegerTy()
2525               ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType)
2526               : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType);
2527       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID,
2528                                       State.CFG.PrevBB);
2529       ScalarIV->setName("offset.idx");
2530     }
2531     if (Trunc) {
2532       auto *TruncType = cast<IntegerType>(Trunc->getType());
2533       assert(Step->getType()->isIntegerTy() &&
2534              "Truncation requires an integer step");
2535       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2536       Step = Builder.CreateTrunc(Step, TruncType);
2537     }
2538     return ScalarIV;
2539   };
2540 
2541   // Create the vector values from the scalar IV, in the absence of creating a
2542   // vector IV.
2543   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2544     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2545     for (unsigned Part = 0; Part < UF; ++Part) {
2546       Value *StartIdx;
2547       if (Step->getType()->isFloatingPointTy())
2548         StartIdx =
2549             getRuntimeVFAsFloat(Builder, Step->getType(), State.VF * Part);
2550       else
2551         StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part);
2552 
2553       Value *EntryPart =
2554           getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(),
2555                         State.VF, State.Builder);
2556       State.set(Def, EntryPart, Part);
2557       if (Trunc)
2558         addMetadata(EntryPart, Trunc);
2559     }
2560   };
2561 
2562   // Fast-math-flags propagate from the original induction instruction.
2563   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2564   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2565     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2566 
2567   // Now do the actual transformations, and start with creating the step value.
2568   Value *Step = CreateStepValue(ID.getStep());
2569   if (State.VF.isScalar()) {
2570     Value *ScalarIV = CreateScalarIV(Step);
2571     Type *ScalarTy = IntegerType::get(ScalarIV->getContext(),
2572                                       Step->getType()->getScalarSizeInBits());
2573 
2574     Instruction::BinaryOps IncOp = ID.getInductionOpcode();
2575     if (IncOp == Instruction::BinaryOpsEnd)
2576       IncOp = Instruction::Add;
2577     for (unsigned Part = 0; Part < UF; ++Part) {
2578       Value *StartIdx = ConstantInt::get(ScalarTy, Part);
2579       Instruction::BinaryOps MulOp = Instruction::Mul;
2580       if (Step->getType()->isFloatingPointTy()) {
2581         StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType());
2582         MulOp = Instruction::FMul;
2583       }
2584 
2585       Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2586       Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction");
2587       State.set(Def, EntryPart, Part);
2588       if (Trunc) {
2589         assert(!Step->getType()->isFloatingPointTy() &&
2590                "fp inductions shouldn't be truncated");
2591         addMetadata(EntryPart, Trunc);
2592       }
2593     }
2594     return;
2595   }
2596 
2597   // Determine if we want a scalar version of the induction variable. This is
2598   // true if the induction variable itself is not widened, or if it has at
2599   // least one user in the loop that is not widened.
2600   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2601   if (!NeedsScalarIV) {
2602     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2603     return;
2604   }
2605 
2606   // Try to create a new independent vector induction variable. If we can't
2607   // create the phi node, we will splat the scalar induction variable in each
2608   // loop iteration.
2609   if (!shouldScalarizeInstruction(EntryVal)) {
2610     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2611     Value *ScalarIV = CreateScalarIV(Step);
2612     // Create scalar steps that can be used by instructions we will later
2613     // scalarize. Note that the addition of the scalar steps will not increase
2614     // the number of instructions in the loop in the common case prior to
2615     // InstCombine. We will be trading one vector extract for each scalar step.
2616     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2617     return;
2618   }
2619 
2620   // All IV users are scalar instructions, so only emit a scalar IV, not a
2621   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2622   // predicate used by the masked loads/stores.
2623   Value *ScalarIV = CreateScalarIV(Step);
2624   if (!Cost->isScalarEpilogueAllowed())
2625     CreateSplatIV(ScalarIV, Step);
2626   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2627 }
2628 
2629 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2630                                            Instruction *EntryVal,
2631                                            const InductionDescriptor &ID,
2632                                            VPValue *Def,
2633                                            VPTransformState &State) {
2634   IRBuilder<> &Builder = State.Builder;
2635   // We shouldn't have to build scalar steps if we aren't vectorizing.
2636   assert(State.VF.isVector() && "VF should be greater than one");
2637   // Get the value type and ensure it and the step have the same integer type.
2638   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2639   assert(ScalarIVTy == Step->getType() &&
2640          "Val and Step should have the same type");
2641 
2642   // We build scalar steps for both integer and floating-point induction
2643   // variables. Here, we determine the kind of arithmetic we will perform.
2644   Instruction::BinaryOps AddOp;
2645   Instruction::BinaryOps MulOp;
2646   if (ScalarIVTy->isIntegerTy()) {
2647     AddOp = Instruction::Add;
2648     MulOp = Instruction::Mul;
2649   } else {
2650     AddOp = ID.getInductionOpcode();
2651     MulOp = Instruction::FMul;
2652   }
2653 
2654   // Determine the number of scalars we need to generate for each unroll
2655   // iteration. If EntryVal is uniform, we only need to generate the first
2656   // lane. Otherwise, we generate all VF values.
2657   bool IsUniform =
2658       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), State.VF);
2659   unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
2660   // Compute the scalar steps and save the results in State.
2661   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2662                                      ScalarIVTy->getScalarSizeInBits());
2663   Type *VecIVTy = nullptr;
2664   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2665   if (!IsUniform && State.VF.isScalable()) {
2666     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2667     UnitStepVec =
2668         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2669     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2670     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2671   }
2672 
2673   for (unsigned Part = 0; Part < State.UF; ++Part) {
2674     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2675 
2676     if (!IsUniform && State.VF.isScalable()) {
2677       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2678       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2679       if (ScalarIVTy->isFloatingPointTy())
2680         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2681       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2682       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2683       State.set(Def, Add, Part);
2684       // It's useful to record the lane values too for the known minimum number
2685       // of elements so we do those below. This improves the code quality when
2686       // trying to extract the first element, for example.
2687     }
2688 
2689     if (ScalarIVTy->isFloatingPointTy())
2690       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2691 
2692     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2693       Value *StartIdx = Builder.CreateBinOp(
2694           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2695       // The step returned by `createStepForVF` is a runtime-evaluated value
2696       // when VF is scalable. Otherwise, it should be folded into a Constant.
2697       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2698              "Expected StartIdx to be folded to a constant when VF is not "
2699              "scalable");
2700       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2701       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2702       State.set(Def, Add, VPIteration(Part, Lane));
2703     }
2704   }
2705 }
2706 
2707 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2708                                                     const VPIteration &Instance,
2709                                                     VPTransformState &State) {
2710   Value *ScalarInst = State.get(Def, Instance);
2711   Value *VectorValue = State.get(Def, Instance.Part);
2712   VectorValue = Builder.CreateInsertElement(
2713       VectorValue, ScalarInst,
2714       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2715   State.set(Def, VectorValue, Instance.Part);
2716 }
2717 
2718 // Return whether we allow using masked interleave-groups (for dealing with
2719 // strided loads/stores that reside in predicated blocks, or for dealing
2720 // with gaps).
2721 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2722   // If an override option has been passed in for interleaved accesses, use it.
2723   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2724     return EnableMaskedInterleavedMemAccesses;
2725 
2726   return TTI.enableMaskedInterleavedAccessVectorization();
2727 }
2728 
2729 // Try to vectorize the interleave group that \p Instr belongs to.
2730 //
2731 // E.g. Translate following interleaved load group (factor = 3):
2732 //   for (i = 0; i < N; i+=3) {
2733 //     R = Pic[i];             // Member of index 0
2734 //     G = Pic[i+1];           // Member of index 1
2735 //     B = Pic[i+2];           // Member of index 2
2736 //     ... // do something to R, G, B
2737 //   }
2738 // To:
2739 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2740 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2741 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2742 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2743 //
2744 // Or translate following interleaved store group (factor = 3):
2745 //   for (i = 0; i < N; i+=3) {
2746 //     ... do something to R, G, B
2747 //     Pic[i]   = R;           // Member of index 0
2748 //     Pic[i+1] = G;           // Member of index 1
2749 //     Pic[i+2] = B;           // Member of index 2
2750 //   }
2751 // To:
2752 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2753 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2754 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2755 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2756 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2757 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2758     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2759     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2760     VPValue *BlockInMask) {
2761   Instruction *Instr = Group->getInsertPos();
2762   const DataLayout &DL = Instr->getModule()->getDataLayout();
2763 
2764   // Prepare for the vector type of the interleaved load/store.
2765   Type *ScalarTy = getLoadStoreType(Instr);
2766   unsigned InterleaveFactor = Group->getFactor();
2767   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2768   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2769 
2770   // Prepare for the new pointers.
2771   SmallVector<Value *, 2> AddrParts;
2772   unsigned Index = Group->getIndex(Instr);
2773 
2774   // TODO: extend the masked interleaved-group support to reversed access.
2775   assert((!BlockInMask || !Group->isReverse()) &&
2776          "Reversed masked interleave-group not supported.");
2777 
2778   // If the group is reverse, adjust the index to refer to the last vector lane
2779   // instead of the first. We adjust the index from the first vector lane,
2780   // rather than directly getting the pointer for lane VF - 1, because the
2781   // pointer operand of the interleaved access is supposed to be uniform. For
2782   // uniform instructions, we're only required to generate a value for the
2783   // first vector lane in each unroll iteration.
2784   if (Group->isReverse())
2785     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2786 
2787   for (unsigned Part = 0; Part < UF; Part++) {
2788     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2789     setDebugLocFromInst(AddrPart);
2790 
2791     // Notice current instruction could be any index. Need to adjust the address
2792     // to the member of index 0.
2793     //
2794     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2795     //       b = A[i];       // Member of index 0
2796     // Current pointer is pointed to A[i+1], adjust it to A[i].
2797     //
2798     // E.g.  A[i+1] = a;     // Member of index 1
2799     //       A[i]   = b;     // Member of index 0
2800     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2801     // Current pointer is pointed to A[i+2], adjust it to A[i].
2802 
2803     bool InBounds = false;
2804     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2805       InBounds = gep->isInBounds();
2806     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2807     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2808 
2809     // Cast to the vector pointer type.
2810     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2811     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2812     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2813   }
2814 
2815   setDebugLocFromInst(Instr);
2816   Value *PoisonVec = PoisonValue::get(VecTy);
2817 
2818   Value *MaskForGaps = nullptr;
2819   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2820     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2821     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2822   }
2823 
2824   // Vectorize the interleaved load group.
2825   if (isa<LoadInst>(Instr)) {
2826     // For each unroll part, create a wide load for the group.
2827     SmallVector<Value *, 2> NewLoads;
2828     for (unsigned Part = 0; Part < UF; Part++) {
2829       Instruction *NewLoad;
2830       if (BlockInMask || MaskForGaps) {
2831         assert(useMaskedInterleavedAccesses(*TTI) &&
2832                "masked interleaved groups are not allowed.");
2833         Value *GroupMask = MaskForGaps;
2834         if (BlockInMask) {
2835           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2836           Value *ShuffledMask = Builder.CreateShuffleVector(
2837               BlockInMaskPart,
2838               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2839               "interleaved.mask");
2840           GroupMask = MaskForGaps
2841                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2842                                                 MaskForGaps)
2843                           : ShuffledMask;
2844         }
2845         NewLoad =
2846             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2847                                      GroupMask, PoisonVec, "wide.masked.vec");
2848       }
2849       else
2850         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2851                                             Group->getAlign(), "wide.vec");
2852       Group->addMetadata(NewLoad);
2853       NewLoads.push_back(NewLoad);
2854     }
2855 
2856     // For each member in the group, shuffle out the appropriate data from the
2857     // wide loads.
2858     unsigned J = 0;
2859     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2860       Instruction *Member = Group->getMember(I);
2861 
2862       // Skip the gaps in the group.
2863       if (!Member)
2864         continue;
2865 
2866       auto StrideMask =
2867           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2868       for (unsigned Part = 0; Part < UF; Part++) {
2869         Value *StridedVec = Builder.CreateShuffleVector(
2870             NewLoads[Part], StrideMask, "strided.vec");
2871 
2872         // If this member has different type, cast the result type.
2873         if (Member->getType() != ScalarTy) {
2874           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2875           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2876           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2877         }
2878 
2879         if (Group->isReverse())
2880           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2881 
2882         State.set(VPDefs[J], StridedVec, Part);
2883       }
2884       ++J;
2885     }
2886     return;
2887   }
2888 
2889   // The sub vector type for current instruction.
2890   auto *SubVT = VectorType::get(ScalarTy, VF);
2891 
2892   // Vectorize the interleaved store group.
2893   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2894   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2895          "masked interleaved groups are not allowed.");
2896   assert((!MaskForGaps || !VF.isScalable()) &&
2897          "masking gaps for scalable vectors is not yet supported.");
2898   for (unsigned Part = 0; Part < UF; Part++) {
2899     // Collect the stored vector from each member.
2900     SmallVector<Value *, 4> StoredVecs;
2901     for (unsigned i = 0; i < InterleaveFactor; i++) {
2902       assert((Group->getMember(i) || MaskForGaps) &&
2903              "Fail to get a member from an interleaved store group");
2904       Instruction *Member = Group->getMember(i);
2905 
2906       // Skip the gaps in the group.
2907       if (!Member) {
2908         Value *Undef = PoisonValue::get(SubVT);
2909         StoredVecs.push_back(Undef);
2910         continue;
2911       }
2912 
2913       Value *StoredVec = State.get(StoredValues[i], Part);
2914 
2915       if (Group->isReverse())
2916         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2917 
2918       // If this member has different type, cast it to a unified type.
2919 
2920       if (StoredVec->getType() != SubVT)
2921         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2922 
2923       StoredVecs.push_back(StoredVec);
2924     }
2925 
2926     // Concatenate all vectors into a wide vector.
2927     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2928 
2929     // Interleave the elements in the wide vector.
2930     Value *IVec = Builder.CreateShuffleVector(
2931         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2932         "interleaved.vec");
2933 
2934     Instruction *NewStoreInstr;
2935     if (BlockInMask || MaskForGaps) {
2936       Value *GroupMask = MaskForGaps;
2937       if (BlockInMask) {
2938         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2939         Value *ShuffledMask = Builder.CreateShuffleVector(
2940             BlockInMaskPart,
2941             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2942             "interleaved.mask");
2943         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2944                                                       ShuffledMask, MaskForGaps)
2945                                 : ShuffledMask;
2946       }
2947       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2948                                                 Group->getAlign(), GroupMask);
2949     } else
2950       NewStoreInstr =
2951           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2952 
2953     Group->addMetadata(NewStoreInstr);
2954   }
2955 }
2956 
2957 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2958                                                VPReplicateRecipe *RepRecipe,
2959                                                const VPIteration &Instance,
2960                                                bool IfPredicateInstr,
2961                                                VPTransformState &State) {
2962   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2963 
2964   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2965   // the first lane and part.
2966   if (isa<NoAliasScopeDeclInst>(Instr))
2967     if (!Instance.isFirstIteration())
2968       return;
2969 
2970   setDebugLocFromInst(Instr);
2971 
2972   // Does this instruction return a value ?
2973   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2974 
2975   Instruction *Cloned = Instr->clone();
2976   if (!IsVoidRetTy)
2977     Cloned->setName(Instr->getName() + ".cloned");
2978 
2979   // If the scalarized instruction contributes to the address computation of a
2980   // widen masked load/store which was in a basic block that needed predication
2981   // and is not predicated after vectorization, we can't propagate
2982   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2983   // instruction could feed a poison value to the base address of the widen
2984   // load/store.
2985   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2986     Cloned->dropPoisonGeneratingFlags();
2987 
2988   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2989                                Builder.GetInsertPoint());
2990   // Replace the operands of the cloned instructions with their scalar
2991   // equivalents in the new loop.
2992   for (auto &I : enumerate(RepRecipe->operands())) {
2993     auto InputInstance = Instance;
2994     VPValue *Operand = I.value();
2995     if (State.Plan->isUniformAfterVectorization(Operand))
2996       InputInstance.Lane = VPLane::getFirstLane();
2997     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2998   }
2999   addNewMetadata(Cloned, Instr);
3000 
3001   // Place the cloned scalar in the new loop.
3002   Builder.Insert(Cloned);
3003 
3004   State.set(RepRecipe, Cloned, Instance);
3005 
3006   // If we just cloned a new assumption, add it the assumption cache.
3007   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3008     AC->registerAssumption(II);
3009 
3010   // End if-block.
3011   if (IfPredicateInstr)
3012     PredicatedInstructions.push_back(Cloned);
3013 }
3014 
3015 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
3016   BasicBlock *Header = L->getHeader();
3017   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
3018 
3019   IRBuilder<> B(Header->getTerminator());
3020   Instruction *OldInst =
3021       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
3022   setDebugLocFromInst(OldInst, &B);
3023 
3024   // Connect the header to the exit and header blocks and replace the old
3025   // terminator.
3026   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
3027 
3028   // Now we have two terminators. Remove the old one from the block.
3029   Header->getTerminator()->eraseFromParent();
3030 }
3031 
3032 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3033   if (TripCount)
3034     return TripCount;
3035 
3036   assert(L && "Create Trip Count for null loop.");
3037   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3038   // Find the loop boundaries.
3039   ScalarEvolution *SE = PSE.getSE();
3040   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3041   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3042          "Invalid loop count");
3043 
3044   Type *IdxTy = Legal->getWidestInductionType();
3045   assert(IdxTy && "No type for induction");
3046 
3047   // The exit count might have the type of i64 while the phi is i32. This can
3048   // happen if we have an induction variable that is sign extended before the
3049   // compare. The only way that we get a backedge taken count is that the
3050   // induction variable was signed and as such will not overflow. In such a case
3051   // truncation is legal.
3052   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3053       IdxTy->getPrimitiveSizeInBits())
3054     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3055   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3056 
3057   // Get the total trip count from the count by adding 1.
3058   const SCEV *ExitCount = SE->getAddExpr(
3059       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3060 
3061   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3062 
3063   // Expand the trip count and place the new instructions in the preheader.
3064   // Notice that the pre-header does not change, only the loop body.
3065   SCEVExpander Exp(*SE, DL, "induction");
3066 
3067   // Count holds the overall loop count (N).
3068   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3069                                 L->getLoopPreheader()->getTerminator());
3070 
3071   if (TripCount->getType()->isPointerTy())
3072     TripCount =
3073         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3074                                     L->getLoopPreheader()->getTerminator());
3075 
3076   return TripCount;
3077 }
3078 
3079 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3080   if (VectorTripCount)
3081     return VectorTripCount;
3082 
3083   Value *TC = getOrCreateTripCount(L);
3084   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3085 
3086   Type *Ty = TC->getType();
3087   // This is where we can make the step a runtime constant.
3088   Value *Step = createStepForVF(Builder, Ty, VF, UF);
3089 
3090   // If the tail is to be folded by masking, round the number of iterations N
3091   // up to a multiple of Step instead of rounding down. This is done by first
3092   // adding Step-1 and then rounding down. Note that it's ok if this addition
3093   // overflows: the vector induction variable will eventually wrap to zero given
3094   // that it starts at zero and its Step is a power of two; the loop will then
3095   // exit, with the last early-exit vector comparison also producing all-true.
3096   if (Cost->foldTailByMasking()) {
3097     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3098            "VF*UF must be a power of 2 when folding tail by masking");
3099     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
3100     TC = Builder.CreateAdd(
3101         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
3102   }
3103 
3104   // Now we need to generate the expression for the part of the loop that the
3105   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3106   // iterations are not required for correctness, or N - Step, otherwise. Step
3107   // is equal to the vectorization factor (number of SIMD elements) times the
3108   // unroll factor (number of SIMD instructions).
3109   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3110 
3111   // There are cases where we *must* run at least one iteration in the remainder
3112   // loop.  See the cost model for when this can happen.  If the step evenly
3113   // divides the trip count, we set the remainder to be equal to the step. If
3114   // the step does not evenly divide the trip count, no adjustment is necessary
3115   // since there will already be scalar iterations. Note that the minimum
3116   // iterations check ensures that N >= Step.
3117   if (Cost->requiresScalarEpilogue(VF)) {
3118     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3119     R = Builder.CreateSelect(IsZero, Step, R);
3120   }
3121 
3122   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3123 
3124   return VectorTripCount;
3125 }
3126 
3127 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3128                                                    const DataLayout &DL) {
3129   // Verify that V is a vector type with same number of elements as DstVTy.
3130   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3131   unsigned VF = DstFVTy->getNumElements();
3132   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3133   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3134   Type *SrcElemTy = SrcVecTy->getElementType();
3135   Type *DstElemTy = DstFVTy->getElementType();
3136   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3137          "Vector elements must have same size");
3138 
3139   // Do a direct cast if element types are castable.
3140   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3141     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3142   }
3143   // V cannot be directly casted to desired vector type.
3144   // May happen when V is a floating point vector but DstVTy is a vector of
3145   // pointers or vice-versa. Handle this using a two-step bitcast using an
3146   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3147   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3148          "Only one type should be a pointer type");
3149   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3150          "Only one type should be a floating point type");
3151   Type *IntTy =
3152       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3153   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3154   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3155   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3156 }
3157 
3158 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3159                                                          BasicBlock *Bypass) {
3160   Value *Count = getOrCreateTripCount(L);
3161   // Reuse existing vector loop preheader for TC checks.
3162   // Note that new preheader block is generated for vector loop.
3163   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3164   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3165 
3166   // Generate code to check if the loop's trip count is less than VF * UF, or
3167   // equal to it in case a scalar epilogue is required; this implies that the
3168   // vector trip count is zero. This check also covers the case where adding one
3169   // to the backedge-taken count overflowed leading to an incorrect trip count
3170   // of zero. In this case we will also jump to the scalar loop.
3171   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3172                                             : ICmpInst::ICMP_ULT;
3173 
3174   // If tail is to be folded, vector loop takes care of all iterations.
3175   Value *CheckMinIters = Builder.getFalse();
3176   if (!Cost->foldTailByMasking()) {
3177     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3178     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3179   }
3180   // Create new preheader for vector loop.
3181   LoopVectorPreHeader =
3182       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3183                  "vector.ph");
3184 
3185   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3186                                DT->getNode(Bypass)->getIDom()) &&
3187          "TC check is expected to dominate Bypass");
3188 
3189   // Update dominator for Bypass & LoopExit (if needed).
3190   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3191   if (!Cost->requiresScalarEpilogue(VF))
3192     // If there is an epilogue which must run, there's no edge from the
3193     // middle block to exit blocks  and thus no need to update the immediate
3194     // dominator of the exit blocks.
3195     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3196 
3197   ReplaceInstWithInst(
3198       TCCheckBlock->getTerminator(),
3199       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3200   LoopBypassBlocks.push_back(TCCheckBlock);
3201 }
3202 
3203 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3204 
3205   BasicBlock *const SCEVCheckBlock =
3206       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3207   if (!SCEVCheckBlock)
3208     return nullptr;
3209 
3210   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3211            (OptForSizeBasedOnProfile &&
3212             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3213          "Cannot SCEV check stride or overflow when optimizing for size");
3214 
3215 
3216   // Update dominator only if this is first RT check.
3217   if (LoopBypassBlocks.empty()) {
3218     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3219     if (!Cost->requiresScalarEpilogue(VF))
3220       // If there is an epilogue which must run, there's no edge from the
3221       // middle block to exit blocks  and thus no need to update the immediate
3222       // dominator of the exit blocks.
3223       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3224   }
3225 
3226   LoopBypassBlocks.push_back(SCEVCheckBlock);
3227   AddedSafetyChecks = true;
3228   return SCEVCheckBlock;
3229 }
3230 
3231 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3232                                                       BasicBlock *Bypass) {
3233   // VPlan-native path does not do any analysis for runtime checks currently.
3234   if (EnableVPlanNativePath)
3235     return nullptr;
3236 
3237   BasicBlock *const MemCheckBlock =
3238       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3239 
3240   // Check if we generated code that checks in runtime if arrays overlap. We put
3241   // the checks into a separate block to make the more common case of few
3242   // elements faster.
3243   if (!MemCheckBlock)
3244     return nullptr;
3245 
3246   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3247     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3248            "Cannot emit memory checks when optimizing for size, unless forced "
3249            "to vectorize.");
3250     ORE->emit([&]() {
3251       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3252                                         L->getStartLoc(), L->getHeader())
3253              << "Code-size may be reduced by not forcing "
3254                 "vectorization, or by source-code modifications "
3255                 "eliminating the need for runtime checks "
3256                 "(e.g., adding 'restrict').";
3257     });
3258   }
3259 
3260   LoopBypassBlocks.push_back(MemCheckBlock);
3261 
3262   AddedSafetyChecks = true;
3263 
3264   // We currently don't use LoopVersioning for the actual loop cloning but we
3265   // still use it to add the noalias metadata.
3266   LVer = std::make_unique<LoopVersioning>(
3267       *Legal->getLAI(),
3268       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3269       DT, PSE.getSE());
3270   LVer->prepareNoAliasMetadata();
3271   return MemCheckBlock;
3272 }
3273 
3274 Value *InnerLoopVectorizer::emitTransformedIndex(
3275     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3276     const InductionDescriptor &ID, BasicBlock *VectorHeader) const {
3277 
3278   SCEVExpander Exp(*SE, DL, "induction");
3279   auto Step = ID.getStep();
3280   auto StartValue = ID.getStartValue();
3281   assert(Index->getType()->getScalarType() == Step->getType() &&
3282          "Index scalar type does not match StepValue type");
3283 
3284   // Note: the IR at this point is broken. We cannot use SE to create any new
3285   // SCEV and then expand it, hoping that SCEV's simplification will give us
3286   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3287   // lead to various SCEV crashes. So all we can do is to use builder and rely
3288   // on InstCombine for future simplifications. Here we handle some trivial
3289   // cases only.
3290   auto CreateAdd = [&B](Value *X, Value *Y) {
3291     assert(X->getType() == Y->getType() && "Types don't match!");
3292     if (auto *CX = dyn_cast<ConstantInt>(X))
3293       if (CX->isZero())
3294         return Y;
3295     if (auto *CY = dyn_cast<ConstantInt>(Y))
3296       if (CY->isZero())
3297         return X;
3298     return B.CreateAdd(X, Y);
3299   };
3300 
3301   // We allow X to be a vector type, in which case Y will potentially be
3302   // splatted into a vector with the same element count.
3303   auto CreateMul = [&B](Value *X, Value *Y) {
3304     assert(X->getType()->getScalarType() == Y->getType() &&
3305            "Types don't match!");
3306     if (auto *CX = dyn_cast<ConstantInt>(X))
3307       if (CX->isOne())
3308         return Y;
3309     if (auto *CY = dyn_cast<ConstantInt>(Y))
3310       if (CY->isOne())
3311         return X;
3312     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3313     if (XVTy && !isa<VectorType>(Y->getType()))
3314       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3315     return B.CreateMul(X, Y);
3316   };
3317 
3318   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3319   // loop, choose the end of the vector loop header (=VectorHeader), because
3320   // the DomTree is not kept up-to-date for additional blocks generated in the
3321   // vector loop. By using the header as insertion point, we guarantee that the
3322   // expanded instructions dominate all their uses.
3323   auto GetInsertPoint = [this, &B, VectorHeader]() {
3324     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3325     if (InsertBB != LoopVectorBody &&
3326         LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB))
3327       return VectorHeader->getTerminator();
3328     return &*B.GetInsertPoint();
3329   };
3330 
3331   switch (ID.getKind()) {
3332   case InductionDescriptor::IK_IntInduction: {
3333     assert(!isa<VectorType>(Index->getType()) &&
3334            "Vector indices not supported for integer inductions yet");
3335     assert(Index->getType() == StartValue->getType() &&
3336            "Index type does not match StartValue type");
3337     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3338       return B.CreateSub(StartValue, Index);
3339     auto *Offset = CreateMul(
3340         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3341     return CreateAdd(StartValue, Offset);
3342   }
3343   case InductionDescriptor::IK_PtrInduction: {
3344     assert(isa<SCEVConstant>(Step) &&
3345            "Expected constant step for pointer induction");
3346     return B.CreateGEP(
3347         ID.getElementType(), StartValue,
3348         CreateMul(Index,
3349                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3350                                     GetInsertPoint())));
3351   }
3352   case InductionDescriptor::IK_FpInduction: {
3353     assert(!isa<VectorType>(Index->getType()) &&
3354            "Vector indices not supported for FP inductions yet");
3355     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3356     auto InductionBinOp = ID.getInductionBinOp();
3357     assert(InductionBinOp &&
3358            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3359             InductionBinOp->getOpcode() == Instruction::FSub) &&
3360            "Original bin op should be defined for FP induction");
3361 
3362     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3363     Value *MulExp = B.CreateFMul(StepValue, Index);
3364     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3365                          "induction");
3366   }
3367   case InductionDescriptor::IK_NoInduction:
3368     return nullptr;
3369   }
3370   llvm_unreachable("invalid enum");
3371 }
3372 
3373 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3374   LoopScalarBody = OrigLoop->getHeader();
3375   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3376   assert(LoopVectorPreHeader && "Invalid loop structure");
3377   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3378   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3379          "multiple exit loop without required epilogue?");
3380 
3381   LoopMiddleBlock =
3382       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3383                  LI, nullptr, Twine(Prefix) + "middle.block");
3384   LoopScalarPreHeader =
3385       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3386                  nullptr, Twine(Prefix) + "scalar.ph");
3387 
3388   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3389 
3390   // Set up the middle block terminator.  Two cases:
3391   // 1) If we know that we must execute the scalar epilogue, emit an
3392   //    unconditional branch.
3393   // 2) Otherwise, we must have a single unique exit block (due to how we
3394   //    implement the multiple exit case).  In this case, set up a conditonal
3395   //    branch from the middle block to the loop scalar preheader, and the
3396   //    exit block.  completeLoopSkeleton will update the condition to use an
3397   //    iteration check, if required to decide whether to execute the remainder.
3398   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3399     BranchInst::Create(LoopScalarPreHeader) :
3400     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3401                        Builder.getTrue());
3402   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3403   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3404 
3405   // We intentionally don't let SplitBlock to update LoopInfo since
3406   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3407   // LoopVectorBody is explicitly added to the correct place few lines later.
3408   LoopVectorBody =
3409       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3410                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3411 
3412   // Update dominator for loop exit.
3413   if (!Cost->requiresScalarEpilogue(VF))
3414     // If there is an epilogue which must run, there's no edge from the
3415     // middle block to exit blocks  and thus no need to update the immediate
3416     // dominator of the exit blocks.
3417     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3418 
3419   // Create and register the new vector loop.
3420   Loop *Lp = LI->AllocateLoop();
3421   Loop *ParentLoop = OrigLoop->getParentLoop();
3422 
3423   // Insert the new loop into the loop nest and register the new basic blocks
3424   // before calling any utilities such as SCEV that require valid LoopInfo.
3425   if (ParentLoop) {
3426     ParentLoop->addChildLoop(Lp);
3427   } else {
3428     LI->addTopLevelLoop(Lp);
3429   }
3430   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3431   return Lp;
3432 }
3433 
3434 void InnerLoopVectorizer::createInductionResumeValues(
3435     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3436   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3437           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3438          "Inconsistent information about additional bypass.");
3439 
3440   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3441   assert(VectorTripCount && L && "Expected valid arguments");
3442   // We are going to resume the execution of the scalar loop.
3443   // Go over all of the induction variables that we found and fix the
3444   // PHIs that are left in the scalar version of the loop.
3445   // The starting values of PHI nodes depend on the counter of the last
3446   // iteration in the vectorized loop.
3447   // If we come from a bypass edge then we need to start from the original
3448   // start value.
3449   Instruction *OldInduction = Legal->getPrimaryInduction();
3450   for (auto &InductionEntry : Legal->getInductionVars()) {
3451     PHINode *OrigPhi = InductionEntry.first;
3452     InductionDescriptor II = InductionEntry.second;
3453 
3454     // Create phi nodes to merge from the  backedge-taken check block.
3455     PHINode *BCResumeVal =
3456         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3457                         LoopScalarPreHeader->getTerminator());
3458     // Copy original phi DL over to the new one.
3459     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3460     Value *&EndValue = IVEndValues[OrigPhi];
3461     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3462     if (OrigPhi == OldInduction) {
3463       // We know what the end value is.
3464       EndValue = VectorTripCount;
3465     } else {
3466       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3467 
3468       // Fast-math-flags propagate from the original induction instruction.
3469       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3470         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3471 
3472       Type *StepType = II.getStep()->getType();
3473       Instruction::CastOps CastOp =
3474           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3475       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3476       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3477       EndValue =
3478           emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3479       EndValue->setName("ind.end");
3480 
3481       // Compute the end value for the additional bypass (if applicable).
3482       if (AdditionalBypass.first) {
3483         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3484         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3485                                          StepType, true);
3486         CRD =
3487             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3488         EndValueFromAdditionalBypass =
3489             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3490         EndValueFromAdditionalBypass->setName("ind.end");
3491       }
3492     }
3493     // The new PHI merges the original incoming value, in case of a bypass,
3494     // or the value at the end of the vectorized loop.
3495     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3496 
3497     // Fix the scalar body counter (PHI node).
3498     // The old induction's phi node in the scalar body needs the truncated
3499     // value.
3500     for (BasicBlock *BB : LoopBypassBlocks)
3501       BCResumeVal->addIncoming(II.getStartValue(), BB);
3502 
3503     if (AdditionalBypass.first)
3504       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3505                                             EndValueFromAdditionalBypass);
3506 
3507     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3508   }
3509 }
3510 
3511 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3512                                                       MDNode *OrigLoopID) {
3513   assert(L && "Expected valid loop.");
3514 
3515   // The trip counts should be cached by now.
3516   Value *Count = getOrCreateTripCount(L);
3517   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3518 
3519   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3520 
3521   // Add a check in the middle block to see if we have completed
3522   // all of the iterations in the first vector loop.  Three cases:
3523   // 1) If we require a scalar epilogue, there is no conditional branch as
3524   //    we unconditionally branch to the scalar preheader.  Do nothing.
3525   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3526   //    Thus if tail is to be folded, we know we don't need to run the
3527   //    remainder and we can use the previous value for the condition (true).
3528   // 3) Otherwise, construct a runtime check.
3529   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3530     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3531                                         Count, VectorTripCount, "cmp.n",
3532                                         LoopMiddleBlock->getTerminator());
3533 
3534     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3535     // of the corresponding compare because they may have ended up with
3536     // different line numbers and we want to avoid awkward line stepping while
3537     // debugging. Eg. if the compare has got a line number inside the loop.
3538     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3539     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3540   }
3541 
3542   // Get ready to start creating new instructions into the vectorized body.
3543   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3544          "Inconsistent vector loop preheader");
3545   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3546 
3547 #ifdef EXPENSIVE_CHECKS
3548   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3549   LI->verify(*DT);
3550 #endif
3551 
3552   return LoopVectorPreHeader;
3553 }
3554 
3555 std::pair<BasicBlock *, Value *>
3556 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3557   /*
3558    In this function we generate a new loop. The new loop will contain
3559    the vectorized instructions while the old loop will continue to run the
3560    scalar remainder.
3561 
3562        [ ] <-- loop iteration number check.
3563     /   |
3564    /    v
3565   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3566   |  /  |
3567   | /   v
3568   ||   [ ]     <-- vector pre header.
3569   |/    |
3570   |     v
3571   |    [  ] \
3572   |    [  ]_|   <-- vector loop.
3573   |     |
3574   |     v
3575   \   -[ ]   <--- middle-block.
3576    \/   |
3577    /\   v
3578    | ->[ ]     <--- new preheader.
3579    |    |
3580  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3581    |   [ ] \
3582    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3583     \   |
3584      \  v
3585       >[ ]     <-- exit block(s).
3586    ...
3587    */
3588 
3589   // Get the metadata of the original loop before it gets modified.
3590   MDNode *OrigLoopID = OrigLoop->getLoopID();
3591 
3592   // Workaround!  Compute the trip count of the original loop and cache it
3593   // before we start modifying the CFG.  This code has a systemic problem
3594   // wherein it tries to run analysis over partially constructed IR; this is
3595   // wrong, and not simply for SCEV.  The trip count of the original loop
3596   // simply happens to be prone to hitting this in practice.  In theory, we
3597   // can hit the same issue for any SCEV, or ValueTracking query done during
3598   // mutation.  See PR49900.
3599   getOrCreateTripCount(OrigLoop);
3600 
3601   // Create an empty vector loop, and prepare basic blocks for the runtime
3602   // checks.
3603   Loop *Lp = createVectorLoopSkeleton("");
3604 
3605   // Now, compare the new count to zero. If it is zero skip the vector loop and
3606   // jump to the scalar loop. This check also covers the case where the
3607   // backedge-taken count is uint##_max: adding one to it will overflow leading
3608   // to an incorrect trip count of zero. In this (rare) case we will also jump
3609   // to the scalar loop.
3610   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3611 
3612   // Generate the code to check any assumptions that we've made for SCEV
3613   // expressions.
3614   emitSCEVChecks(Lp, LoopScalarPreHeader);
3615 
3616   // Generate the code that checks in runtime if arrays overlap. We put the
3617   // checks into a separate block to make the more common case of few elements
3618   // faster.
3619   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3620 
3621   createHeaderBranch(Lp);
3622 
3623   // Emit phis for the new starting index of the scalar loop.
3624   createInductionResumeValues(Lp);
3625 
3626   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3627 }
3628 
3629 // Fix up external users of the induction variable. At this point, we are
3630 // in LCSSA form, with all external PHIs that use the IV having one input value,
3631 // coming from the remainder loop. We need those PHIs to also have a correct
3632 // value for the IV when arriving directly from the middle block.
3633 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3634                                        const InductionDescriptor &II,
3635                                        Value *CountRoundDown, Value *EndValue,
3636                                        BasicBlock *MiddleBlock) {
3637   // There are two kinds of external IV usages - those that use the value
3638   // computed in the last iteration (the PHI) and those that use the penultimate
3639   // value (the value that feeds into the phi from the loop latch).
3640   // We allow both, but they, obviously, have different values.
3641 
3642   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3643 
3644   DenseMap<Value *, Value *> MissingVals;
3645 
3646   // An external user of the last iteration's value should see the value that
3647   // the remainder loop uses to initialize its own IV.
3648   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3649   for (User *U : PostInc->users()) {
3650     Instruction *UI = cast<Instruction>(U);
3651     if (!OrigLoop->contains(UI)) {
3652       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3653       MissingVals[UI] = EndValue;
3654     }
3655   }
3656 
3657   // An external user of the penultimate value need to see EndValue - Step.
3658   // The simplest way to get this is to recompute it from the constituent SCEVs,
3659   // that is Start + (Step * (CRD - 1)).
3660   for (User *U : OrigPhi->users()) {
3661     auto *UI = cast<Instruction>(U);
3662     if (!OrigLoop->contains(UI)) {
3663       const DataLayout &DL =
3664           OrigLoop->getHeader()->getModule()->getDataLayout();
3665       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3666 
3667       IRBuilder<> B(MiddleBlock->getTerminator());
3668 
3669       // Fast-math-flags propagate from the original induction instruction.
3670       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3671         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3672 
3673       Value *CountMinusOne = B.CreateSub(
3674           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3675       Value *CMO =
3676           !II.getStep()->getType()->isIntegerTy()
3677               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3678                              II.getStep()->getType())
3679               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3680       CMO->setName("cast.cmo");
3681       Value *Escape =
3682           emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody);
3683       Escape->setName("ind.escape");
3684       MissingVals[UI] = Escape;
3685     }
3686   }
3687 
3688   for (auto &I : MissingVals) {
3689     PHINode *PHI = cast<PHINode>(I.first);
3690     // One corner case we have to handle is two IVs "chasing" each-other,
3691     // that is %IV2 = phi [...], [ %IV1, %latch ]
3692     // In this case, if IV1 has an external use, we need to avoid adding both
3693     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3694     // don't already have an incoming value for the middle block.
3695     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3696       PHI->addIncoming(I.second, MiddleBlock);
3697   }
3698 }
3699 
3700 namespace {
3701 
3702 struct CSEDenseMapInfo {
3703   static bool canHandle(const Instruction *I) {
3704     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3705            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3706   }
3707 
3708   static inline Instruction *getEmptyKey() {
3709     return DenseMapInfo<Instruction *>::getEmptyKey();
3710   }
3711 
3712   static inline Instruction *getTombstoneKey() {
3713     return DenseMapInfo<Instruction *>::getTombstoneKey();
3714   }
3715 
3716   static unsigned getHashValue(const Instruction *I) {
3717     assert(canHandle(I) && "Unknown instruction!");
3718     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3719                                                            I->value_op_end()));
3720   }
3721 
3722   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3723     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3724         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3725       return LHS == RHS;
3726     return LHS->isIdenticalTo(RHS);
3727   }
3728 };
3729 
3730 } // end anonymous namespace
3731 
3732 ///Perform cse of induction variable instructions.
3733 static void cse(BasicBlock *BB) {
3734   // Perform simple cse.
3735   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3736   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3737     if (!CSEDenseMapInfo::canHandle(&In))
3738       continue;
3739 
3740     // Check if we can replace this instruction with any of the
3741     // visited instructions.
3742     if (Instruction *V = CSEMap.lookup(&In)) {
3743       In.replaceAllUsesWith(V);
3744       In.eraseFromParent();
3745       continue;
3746     }
3747 
3748     CSEMap[&In] = &In;
3749   }
3750 }
3751 
3752 InstructionCost
3753 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3754                                               bool &NeedToScalarize) const {
3755   Function *F = CI->getCalledFunction();
3756   Type *ScalarRetTy = CI->getType();
3757   SmallVector<Type *, 4> Tys, ScalarTys;
3758   for (auto &ArgOp : CI->args())
3759     ScalarTys.push_back(ArgOp->getType());
3760 
3761   // Estimate cost of scalarized vector call. The source operands are assumed
3762   // to be vectors, so we need to extract individual elements from there,
3763   // execute VF scalar calls, and then gather the result into the vector return
3764   // value.
3765   InstructionCost ScalarCallCost =
3766       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3767   if (VF.isScalar())
3768     return ScalarCallCost;
3769 
3770   // Compute corresponding vector type for return value and arguments.
3771   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3772   for (Type *ScalarTy : ScalarTys)
3773     Tys.push_back(ToVectorTy(ScalarTy, VF));
3774 
3775   // Compute costs of unpacking argument values for the scalar calls and
3776   // packing the return values to a vector.
3777   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3778 
3779   InstructionCost Cost =
3780       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3781 
3782   // If we can't emit a vector call for this function, then the currently found
3783   // cost is the cost we need to return.
3784   NeedToScalarize = true;
3785   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3786   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3787 
3788   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3789     return Cost;
3790 
3791   // If the corresponding vector cost is cheaper, return its cost.
3792   InstructionCost VectorCallCost =
3793       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3794   if (VectorCallCost < Cost) {
3795     NeedToScalarize = false;
3796     Cost = VectorCallCost;
3797   }
3798   return Cost;
3799 }
3800 
3801 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3802   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3803     return Elt;
3804   return VectorType::get(Elt, VF);
3805 }
3806 
3807 InstructionCost
3808 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3809                                                    ElementCount VF) const {
3810   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3811   assert(ID && "Expected intrinsic call!");
3812   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3813   FastMathFlags FMF;
3814   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3815     FMF = FPMO->getFastMathFlags();
3816 
3817   SmallVector<const Value *> Arguments(CI->args());
3818   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3819   SmallVector<Type *> ParamTys;
3820   std::transform(FTy->param_begin(), FTy->param_end(),
3821                  std::back_inserter(ParamTys),
3822                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3823 
3824   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3825                                     dyn_cast<IntrinsicInst>(CI));
3826   return TTI.getIntrinsicInstrCost(CostAttrs,
3827                                    TargetTransformInfo::TCK_RecipThroughput);
3828 }
3829 
3830 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3831   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3832   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3833   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3834 }
3835 
3836 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3837   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3838   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3839   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3840 }
3841 
3842 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3843   // For every instruction `I` in MinBWs, truncate the operands, create a
3844   // truncated version of `I` and reextend its result. InstCombine runs
3845   // later and will remove any ext/trunc pairs.
3846   SmallPtrSet<Value *, 4> Erased;
3847   for (const auto &KV : Cost->getMinimalBitwidths()) {
3848     // If the value wasn't vectorized, we must maintain the original scalar
3849     // type. The absence of the value from State indicates that it
3850     // wasn't vectorized.
3851     // FIXME: Should not rely on getVPValue at this point.
3852     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3853     if (!State.hasAnyVectorValue(Def))
3854       continue;
3855     for (unsigned Part = 0; Part < UF; ++Part) {
3856       Value *I = State.get(Def, Part);
3857       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3858         continue;
3859       Type *OriginalTy = I->getType();
3860       Type *ScalarTruncatedTy =
3861           IntegerType::get(OriginalTy->getContext(), KV.second);
3862       auto *TruncatedTy = VectorType::get(
3863           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3864       if (TruncatedTy == OriginalTy)
3865         continue;
3866 
3867       IRBuilder<> B(cast<Instruction>(I));
3868       auto ShrinkOperand = [&](Value *V) -> Value * {
3869         if (auto *ZI = dyn_cast<ZExtInst>(V))
3870           if (ZI->getSrcTy() == TruncatedTy)
3871             return ZI->getOperand(0);
3872         return B.CreateZExtOrTrunc(V, TruncatedTy);
3873       };
3874 
3875       // The actual instruction modification depends on the instruction type,
3876       // unfortunately.
3877       Value *NewI = nullptr;
3878       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3879         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3880                              ShrinkOperand(BO->getOperand(1)));
3881 
3882         // Any wrapping introduced by shrinking this operation shouldn't be
3883         // considered undefined behavior. So, we can't unconditionally copy
3884         // arithmetic wrapping flags to NewI.
3885         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3886       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3887         NewI =
3888             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3889                          ShrinkOperand(CI->getOperand(1)));
3890       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3891         NewI = B.CreateSelect(SI->getCondition(),
3892                               ShrinkOperand(SI->getTrueValue()),
3893                               ShrinkOperand(SI->getFalseValue()));
3894       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3895         switch (CI->getOpcode()) {
3896         default:
3897           llvm_unreachable("Unhandled cast!");
3898         case Instruction::Trunc:
3899           NewI = ShrinkOperand(CI->getOperand(0));
3900           break;
3901         case Instruction::SExt:
3902           NewI = B.CreateSExtOrTrunc(
3903               CI->getOperand(0),
3904               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3905           break;
3906         case Instruction::ZExt:
3907           NewI = B.CreateZExtOrTrunc(
3908               CI->getOperand(0),
3909               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3910           break;
3911         }
3912       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3913         auto Elements0 =
3914             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3915         auto *O0 = B.CreateZExtOrTrunc(
3916             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3917         auto Elements1 =
3918             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3919         auto *O1 = B.CreateZExtOrTrunc(
3920             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3921 
3922         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3923       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3924         // Don't do anything with the operands, just extend the result.
3925         continue;
3926       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3927         auto Elements =
3928             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3929         auto *O0 = B.CreateZExtOrTrunc(
3930             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3931         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3932         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3933       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3934         auto Elements =
3935             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3936         auto *O0 = B.CreateZExtOrTrunc(
3937             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3938         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3939       } else {
3940         // If we don't know what to do, be conservative and don't do anything.
3941         continue;
3942       }
3943 
3944       // Lastly, extend the result.
3945       NewI->takeName(cast<Instruction>(I));
3946       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3947       I->replaceAllUsesWith(Res);
3948       cast<Instruction>(I)->eraseFromParent();
3949       Erased.insert(I);
3950       State.reset(Def, Res, Part);
3951     }
3952   }
3953 
3954   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3955   for (const auto &KV : Cost->getMinimalBitwidths()) {
3956     // If the value wasn't vectorized, we must maintain the original scalar
3957     // type. The absence of the value from State indicates that it
3958     // wasn't vectorized.
3959     // FIXME: Should not rely on getVPValue at this point.
3960     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3961     if (!State.hasAnyVectorValue(Def))
3962       continue;
3963     for (unsigned Part = 0; Part < UF; ++Part) {
3964       Value *I = State.get(Def, Part);
3965       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3966       if (Inst && Inst->use_empty()) {
3967         Value *NewI = Inst->getOperand(0);
3968         Inst->eraseFromParent();
3969         State.reset(Def, NewI, Part);
3970       }
3971     }
3972   }
3973 }
3974 
3975 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3976   // Insert truncates and extends for any truncated instructions as hints to
3977   // InstCombine.
3978   if (VF.isVector())
3979     truncateToMinimalBitwidths(State);
3980 
3981   // Fix widened non-induction PHIs by setting up the PHI operands.
3982   if (OrigPHIsToFix.size()) {
3983     assert(EnableVPlanNativePath &&
3984            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3985     fixNonInductionPHIs(State);
3986   }
3987 
3988   // At this point every instruction in the original loop is widened to a
3989   // vector form. Now we need to fix the recurrences in the loop. These PHI
3990   // nodes are currently empty because we did not want to introduce cycles.
3991   // This is the second stage of vectorizing recurrences.
3992   fixCrossIterationPHIs(State);
3993 
3994   // Forget the original basic block.
3995   PSE.getSE()->forgetLoop(OrigLoop);
3996 
3997   // If we inserted an edge from the middle block to the unique exit block,
3998   // update uses outside the loop (phis) to account for the newly inserted
3999   // edge.
4000   if (!Cost->requiresScalarEpilogue(VF)) {
4001     // Fix-up external users of the induction variables.
4002     for (auto &Entry : Legal->getInductionVars())
4003       fixupIVUsers(Entry.first, Entry.second,
4004                    getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4005                    IVEndValues[Entry.first], LoopMiddleBlock);
4006 
4007     fixLCSSAPHIs(State);
4008   }
4009 
4010   for (Instruction *PI : PredicatedInstructions)
4011     sinkScalarOperands(&*PI);
4012 
4013   // Remove redundant induction instructions.
4014   cse(LoopVectorBody);
4015 
4016   // Set/update profile weights for the vector and remainder loops as original
4017   // loop iterations are now distributed among them. Note that original loop
4018   // represented by LoopScalarBody becomes remainder loop after vectorization.
4019   //
4020   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4021   // end up getting slightly roughened result but that should be OK since
4022   // profile is not inherently precise anyway. Note also possible bypass of
4023   // vector code caused by legality checks is ignored, assigning all the weight
4024   // to the vector loop, optimistically.
4025   //
4026   // For scalable vectorization we can't know at compile time how many iterations
4027   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4028   // vscale of '1'.
4029   setProfileInfoAfterUnrolling(
4030       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4031       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4032 }
4033 
4034 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4035   // In order to support recurrences we need to be able to vectorize Phi nodes.
4036   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4037   // stage #2: We now need to fix the recurrences by adding incoming edges to
4038   // the currently empty PHI nodes. At this point every instruction in the
4039   // original loop is widened to a vector form so we can use them to construct
4040   // the incoming edges.
4041   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4042   for (VPRecipeBase &R : Header->phis()) {
4043     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
4044       fixReduction(ReductionPhi, State);
4045     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
4046       fixFirstOrderRecurrence(FOR, State);
4047   }
4048 }
4049 
4050 void InnerLoopVectorizer::fixFirstOrderRecurrence(
4051     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
4052   // This is the second phase of vectorizing first-order recurrences. An
4053   // overview of the transformation is described below. Suppose we have the
4054   // following loop.
4055   //
4056   //   for (int i = 0; i < n; ++i)
4057   //     b[i] = a[i] - a[i - 1];
4058   //
4059   // There is a first-order recurrence on "a". For this loop, the shorthand
4060   // scalar IR looks like:
4061   //
4062   //   scalar.ph:
4063   //     s_init = a[-1]
4064   //     br scalar.body
4065   //
4066   //   scalar.body:
4067   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4068   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4069   //     s2 = a[i]
4070   //     b[i] = s2 - s1
4071   //     br cond, scalar.body, ...
4072   //
4073   // In this example, s1 is a recurrence because it's value depends on the
4074   // previous iteration. In the first phase of vectorization, we created a
4075   // vector phi v1 for s1. We now complete the vectorization and produce the
4076   // shorthand vector IR shown below (for VF = 4, UF = 1).
4077   //
4078   //   vector.ph:
4079   //     v_init = vector(..., ..., ..., a[-1])
4080   //     br vector.body
4081   //
4082   //   vector.body
4083   //     i = phi [0, vector.ph], [i+4, vector.body]
4084   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4085   //     v2 = a[i, i+1, i+2, i+3];
4086   //     v3 = vector(v1(3), v2(0, 1, 2))
4087   //     b[i, i+1, i+2, i+3] = v2 - v3
4088   //     br cond, vector.body, middle.block
4089   //
4090   //   middle.block:
4091   //     x = v2(3)
4092   //     br scalar.ph
4093   //
4094   //   scalar.ph:
4095   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4096   //     br scalar.body
4097   //
4098   // After execution completes the vector loop, we extract the next value of
4099   // the recurrence (x) to use as the initial value in the scalar loop.
4100 
4101   // Extract the last vector element in the middle block. This will be the
4102   // initial value for the recurrence when jumping to the scalar loop.
4103   VPValue *PreviousDef = PhiR->getBackedgeValue();
4104   Value *Incoming = State.get(PreviousDef, UF - 1);
4105   auto *ExtractForScalar = Incoming;
4106   auto *IdxTy = Builder.getInt32Ty();
4107   if (VF.isVector()) {
4108     auto *One = ConstantInt::get(IdxTy, 1);
4109     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4110     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4111     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4112     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4113                                                     "vector.recur.extract");
4114   }
4115   // Extract the second last element in the middle block if the
4116   // Phi is used outside the loop. We need to extract the phi itself
4117   // and not the last element (the phi update in the current iteration). This
4118   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4119   // when the scalar loop is not run at all.
4120   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4121   if (VF.isVector()) {
4122     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4123     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4124     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4125         Incoming, Idx, "vector.recur.extract.for.phi");
4126   } else if (UF > 1)
4127     // When loop is unrolled without vectorizing, initialize
4128     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4129     // of `Incoming`. This is analogous to the vectorized case above: extracting
4130     // the second last element when VF > 1.
4131     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4132 
4133   // Fix the initial value of the original recurrence in the scalar loop.
4134   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4135   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4136   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4137   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4138   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4139     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4140     Start->addIncoming(Incoming, BB);
4141   }
4142 
4143   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4144   Phi->setName("scalar.recur");
4145 
4146   // Finally, fix users of the recurrence outside the loop. The users will need
4147   // either the last value of the scalar recurrence or the last value of the
4148   // vector recurrence we extracted in the middle block. Since the loop is in
4149   // LCSSA form, we just need to find all the phi nodes for the original scalar
4150   // recurrence in the exit block, and then add an edge for the middle block.
4151   // Note that LCSSA does not imply single entry when the original scalar loop
4152   // had multiple exiting edges (as we always run the last iteration in the
4153   // scalar epilogue); in that case, there is no edge from middle to exit and
4154   // and thus no phis which needed updated.
4155   if (!Cost->requiresScalarEpilogue(VF))
4156     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4157       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
4158         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4159 }
4160 
4161 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4162                                        VPTransformState &State) {
4163   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4164   // Get it's reduction variable descriptor.
4165   assert(Legal->isReductionVariable(OrigPhi) &&
4166          "Unable to find the reduction variable");
4167   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4168 
4169   RecurKind RK = RdxDesc.getRecurrenceKind();
4170   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4171   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4172   setDebugLocFromInst(ReductionStartValue);
4173 
4174   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4175   // This is the vector-clone of the value that leaves the loop.
4176   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4177 
4178   // Wrap flags are in general invalid after vectorization, clear them.
4179   clearReductionWrapFlags(RdxDesc, State);
4180 
4181   // Before each round, move the insertion point right between
4182   // the PHIs and the values we are going to write.
4183   // This allows us to write both PHINodes and the extractelement
4184   // instructions.
4185   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4186 
4187   setDebugLocFromInst(LoopExitInst);
4188 
4189   Type *PhiTy = OrigPhi->getType();
4190   // If tail is folded by masking, the vector value to leave the loop should be
4191   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4192   // instead of the former. For an inloop reduction the reduction will already
4193   // be predicated, and does not need to be handled here.
4194   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4195     for (unsigned Part = 0; Part < UF; ++Part) {
4196       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4197       Value *Sel = nullptr;
4198       for (User *U : VecLoopExitInst->users()) {
4199         if (isa<SelectInst>(U)) {
4200           assert(!Sel && "Reduction exit feeding two selects");
4201           Sel = U;
4202         } else
4203           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4204       }
4205       assert(Sel && "Reduction exit feeds no select");
4206       State.reset(LoopExitInstDef, Sel, Part);
4207 
4208       // If the target can create a predicated operator for the reduction at no
4209       // extra cost in the loop (for example a predicated vadd), it can be
4210       // cheaper for the select to remain in the loop than be sunk out of it,
4211       // and so use the select value for the phi instead of the old
4212       // LoopExitValue.
4213       if (PreferPredicatedReductionSelect ||
4214           TTI->preferPredicatedReductionSelect(
4215               RdxDesc.getOpcode(), PhiTy,
4216               TargetTransformInfo::ReductionFlags())) {
4217         auto *VecRdxPhi =
4218             cast<PHINode>(State.get(PhiR, Part));
4219         VecRdxPhi->setIncomingValueForBlock(
4220             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4221       }
4222     }
4223   }
4224 
4225   // If the vector reduction can be performed in a smaller type, we truncate
4226   // then extend the loop exit value to enable InstCombine to evaluate the
4227   // entire expression in the smaller type.
4228   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4229     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4230     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4231     Builder.SetInsertPoint(
4232         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4233     VectorParts RdxParts(UF);
4234     for (unsigned Part = 0; Part < UF; ++Part) {
4235       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4236       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4237       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4238                                         : Builder.CreateZExt(Trunc, VecTy);
4239       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
4240         if (U != Trunc) {
4241           U->replaceUsesOfWith(RdxParts[Part], Extnd);
4242           RdxParts[Part] = Extnd;
4243         }
4244     }
4245     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4246     for (unsigned Part = 0; Part < UF; ++Part) {
4247       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4248       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4249     }
4250   }
4251 
4252   // Reduce all of the unrolled parts into a single vector.
4253   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4254   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4255 
4256   // The middle block terminator has already been assigned a DebugLoc here (the
4257   // OrigLoop's single latch terminator). We want the whole middle block to
4258   // appear to execute on this line because: (a) it is all compiler generated,
4259   // (b) these instructions are always executed after evaluating the latch
4260   // conditional branch, and (c) other passes may add new predecessors which
4261   // terminate on this line. This is the easiest way to ensure we don't
4262   // accidentally cause an extra step back into the loop while debugging.
4263   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4264   if (PhiR->isOrdered())
4265     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4266   else {
4267     // Floating-point operations should have some FMF to enable the reduction.
4268     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4269     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4270     for (unsigned Part = 1; Part < UF; ++Part) {
4271       Value *RdxPart = State.get(LoopExitInstDef, Part);
4272       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4273         ReducedPartRdx = Builder.CreateBinOp(
4274             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4275       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4276         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4277                                            ReducedPartRdx, RdxPart);
4278       else
4279         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4280     }
4281   }
4282 
4283   // Create the reduction after the loop. Note that inloop reductions create the
4284   // target reduction in the loop using a Reduction recipe.
4285   if (VF.isVector() && !PhiR->isInLoop()) {
4286     ReducedPartRdx =
4287         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4288     // If the reduction can be performed in a smaller type, we need to extend
4289     // the reduction to the wider type before we branch to the original loop.
4290     if (PhiTy != RdxDesc.getRecurrenceType())
4291       ReducedPartRdx = RdxDesc.isSigned()
4292                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4293                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4294   }
4295 
4296   // Create a phi node that merges control-flow from the backedge-taken check
4297   // block and the middle block.
4298   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4299                                         LoopScalarPreHeader->getTerminator());
4300   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4301     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4302   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4303 
4304   // Now, we need to fix the users of the reduction variable
4305   // inside and outside of the scalar remainder loop.
4306 
4307   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4308   // in the exit blocks.  See comment on analogous loop in
4309   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4310   if (!Cost->requiresScalarEpilogue(VF))
4311     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4312       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4313         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4314 
4315   // Fix the scalar loop reduction variable with the incoming reduction sum
4316   // from the vector body and from the backedge value.
4317   int IncomingEdgeBlockIdx =
4318       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4319   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4320   // Pick the other block.
4321   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4322   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4323   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4324 }
4325 
4326 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4327                                                   VPTransformState &State) {
4328   RecurKind RK = RdxDesc.getRecurrenceKind();
4329   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4330     return;
4331 
4332   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4333   assert(LoopExitInstr && "null loop exit instruction");
4334   SmallVector<Instruction *, 8> Worklist;
4335   SmallPtrSet<Instruction *, 8> Visited;
4336   Worklist.push_back(LoopExitInstr);
4337   Visited.insert(LoopExitInstr);
4338 
4339   while (!Worklist.empty()) {
4340     Instruction *Cur = Worklist.pop_back_val();
4341     if (isa<OverflowingBinaryOperator>(Cur))
4342       for (unsigned Part = 0; Part < UF; ++Part) {
4343         // FIXME: Should not rely on getVPValue at this point.
4344         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4345         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4346       }
4347 
4348     for (User *U : Cur->users()) {
4349       Instruction *UI = cast<Instruction>(U);
4350       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4351           Visited.insert(UI).second)
4352         Worklist.push_back(UI);
4353     }
4354   }
4355 }
4356 
4357 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4358   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4359     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4360       // Some phis were already hand updated by the reduction and recurrence
4361       // code above, leave them alone.
4362       continue;
4363 
4364     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4365     // Non-instruction incoming values will have only one value.
4366 
4367     VPLane Lane = VPLane::getFirstLane();
4368     if (isa<Instruction>(IncomingValue) &&
4369         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4370                                            VF))
4371       Lane = VPLane::getLastLaneForVF(VF);
4372 
4373     // Can be a loop invariant incoming value or the last scalar value to be
4374     // extracted from the vectorized loop.
4375     // FIXME: Should not rely on getVPValue at this point.
4376     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4377     Value *lastIncomingValue =
4378         OrigLoop->isLoopInvariant(IncomingValue)
4379             ? IncomingValue
4380             : State.get(State.Plan->getVPValue(IncomingValue, true),
4381                         VPIteration(UF - 1, Lane));
4382     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4383   }
4384 }
4385 
4386 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4387   // The basic block and loop containing the predicated instruction.
4388   auto *PredBB = PredInst->getParent();
4389   auto *VectorLoop = LI->getLoopFor(PredBB);
4390 
4391   // Initialize a worklist with the operands of the predicated instruction.
4392   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4393 
4394   // Holds instructions that we need to analyze again. An instruction may be
4395   // reanalyzed if we don't yet know if we can sink it or not.
4396   SmallVector<Instruction *, 8> InstsToReanalyze;
4397 
4398   // Returns true if a given use occurs in the predicated block. Phi nodes use
4399   // their operands in their corresponding predecessor blocks.
4400   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4401     auto *I = cast<Instruction>(U.getUser());
4402     BasicBlock *BB = I->getParent();
4403     if (auto *Phi = dyn_cast<PHINode>(I))
4404       BB = Phi->getIncomingBlock(
4405           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4406     return BB == PredBB;
4407   };
4408 
4409   // Iteratively sink the scalarized operands of the predicated instruction
4410   // into the block we created for it. When an instruction is sunk, it's
4411   // operands are then added to the worklist. The algorithm ends after one pass
4412   // through the worklist doesn't sink a single instruction.
4413   bool Changed;
4414   do {
4415     // Add the instructions that need to be reanalyzed to the worklist, and
4416     // reset the changed indicator.
4417     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4418     InstsToReanalyze.clear();
4419     Changed = false;
4420 
4421     while (!Worklist.empty()) {
4422       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4423 
4424       // We can't sink an instruction if it is a phi node, is not in the loop,
4425       // or may have side effects.
4426       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4427           I->mayHaveSideEffects())
4428         continue;
4429 
4430       // If the instruction is already in PredBB, check if we can sink its
4431       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4432       // sinking the scalar instruction I, hence it appears in PredBB; but it
4433       // may have failed to sink I's operands (recursively), which we try
4434       // (again) here.
4435       if (I->getParent() == PredBB) {
4436         Worklist.insert(I->op_begin(), I->op_end());
4437         continue;
4438       }
4439 
4440       // It's legal to sink the instruction if all its uses occur in the
4441       // predicated block. Otherwise, there's nothing to do yet, and we may
4442       // need to reanalyze the instruction.
4443       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4444         InstsToReanalyze.push_back(I);
4445         continue;
4446       }
4447 
4448       // Move the instruction to the beginning of the predicated block, and add
4449       // it's operands to the worklist.
4450       I->moveBefore(&*PredBB->getFirstInsertionPt());
4451       Worklist.insert(I->op_begin(), I->op_end());
4452 
4453       // The sinking may have enabled other instructions to be sunk, so we will
4454       // need to iterate.
4455       Changed = true;
4456     }
4457   } while (Changed);
4458 }
4459 
4460 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4461   for (PHINode *OrigPhi : OrigPHIsToFix) {
4462     VPWidenPHIRecipe *VPPhi =
4463         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4464     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4465     // Make sure the builder has a valid insert point.
4466     Builder.SetInsertPoint(NewPhi);
4467     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4468       VPValue *Inc = VPPhi->getIncomingValue(i);
4469       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4470       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4471     }
4472   }
4473 }
4474 
4475 bool InnerLoopVectorizer::useOrderedReductions(
4476     const RecurrenceDescriptor &RdxDesc) {
4477   return Cost->useOrderedReductions(RdxDesc);
4478 }
4479 
4480 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4481                                               VPWidenPHIRecipe *PhiR,
4482                                               VPTransformState &State) {
4483   PHINode *P = cast<PHINode>(PN);
4484   if (EnableVPlanNativePath) {
4485     // Currently we enter here in the VPlan-native path for non-induction
4486     // PHIs where all control flow is uniform. We simply widen these PHIs.
4487     // Create a vector phi with no operands - the vector phi operands will be
4488     // set at the end of vector code generation.
4489     Type *VecTy = (State.VF.isScalar())
4490                       ? PN->getType()
4491                       : VectorType::get(PN->getType(), State.VF);
4492     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4493     State.set(PhiR, VecPhi, 0);
4494     OrigPHIsToFix.push_back(P);
4495 
4496     return;
4497   }
4498 
4499   assert(PN->getParent() == OrigLoop->getHeader() &&
4500          "Non-header phis should have been handled elsewhere");
4501 
4502   // In order to support recurrences we need to be able to vectorize Phi nodes.
4503   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4504   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4505   // this value when we vectorize all of the instructions that use the PHI.
4506 
4507   assert(!Legal->isReductionVariable(P) &&
4508          "reductions should be handled elsewhere");
4509 
4510   setDebugLocFromInst(P);
4511 
4512   // This PHINode must be an induction variable.
4513   // Make sure that we know about it.
4514   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4515 
4516   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4517   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4518 
4519   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4520   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4521 
4522   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4523   // which can be found from the original scalar operations.
4524   switch (II.getKind()) {
4525   case InductionDescriptor::IK_NoInduction:
4526     llvm_unreachable("Unknown induction");
4527   case InductionDescriptor::IK_IntInduction:
4528   case InductionDescriptor::IK_FpInduction:
4529     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4530   case InductionDescriptor::IK_PtrInduction: {
4531     // Handle the pointer induction variable case.
4532     assert(P->getType()->isPointerTy() && "Unexpected type.");
4533 
4534     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4535       // This is the normalized GEP that starts counting at zero.
4536       Value *PtrInd =
4537           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4538       // Determine the number of scalars we need to generate for each unroll
4539       // iteration. If the instruction is uniform, we only need to generate the
4540       // first lane. Otherwise, we generate all VF values.
4541       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4542       assert((IsUniform || !State.VF.isScalable()) &&
4543              "Cannot scalarize a scalable VF");
4544       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4545 
4546       for (unsigned Part = 0; Part < UF; ++Part) {
4547         Value *PartStart =
4548             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4549 
4550         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4551           Value *Idx = Builder.CreateAdd(
4552               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4553           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4554           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(),
4555                                                 DL, II, State.CFG.PrevBB);
4556           SclrGep->setName("next.gep");
4557           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4558         }
4559       }
4560       return;
4561     }
4562     assert(isa<SCEVConstant>(II.getStep()) &&
4563            "Induction step not a SCEV constant!");
4564     Type *PhiType = II.getStep()->getType();
4565 
4566     // Build a pointer phi
4567     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4568     Type *ScStValueType = ScalarStartValue->getType();
4569     PHINode *NewPointerPhi =
4570         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4571     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4572 
4573     // A pointer induction, performed by using a gep
4574     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4575     Instruction *InductionLoc = LoopLatch->getTerminator();
4576     const SCEV *ScalarStep = II.getStep();
4577     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4578     Value *ScalarStepValue =
4579         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4580     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4581     Value *NumUnrolledElems =
4582         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4583     Value *InductionGEP = GetElementPtrInst::Create(
4584         II.getElementType(), NewPointerPhi,
4585         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4586         InductionLoc);
4587     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4588 
4589     // Create UF many actual address geps that use the pointer
4590     // phi as base and a vectorized version of the step value
4591     // (<step*0, ..., step*N>) as offset.
4592     for (unsigned Part = 0; Part < State.UF; ++Part) {
4593       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4594       Value *StartOffsetScalar =
4595           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4596       Value *StartOffset =
4597           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4598       // Create a vector of consecutive numbers from zero to VF.
4599       StartOffset =
4600           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4601 
4602       Value *GEP = Builder.CreateGEP(
4603           II.getElementType(), NewPointerPhi,
4604           Builder.CreateMul(
4605               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4606               "vector.gep"));
4607       State.set(PhiR, GEP, Part);
4608     }
4609   }
4610   }
4611 }
4612 
4613 /// A helper function for checking whether an integer division-related
4614 /// instruction may divide by zero (in which case it must be predicated if
4615 /// executed conditionally in the scalar code).
4616 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4617 /// Non-zero divisors that are non compile-time constants will not be
4618 /// converted into multiplication, so we will still end up scalarizing
4619 /// the division, but can do so w/o predication.
4620 static bool mayDivideByZero(Instruction &I) {
4621   assert((I.getOpcode() == Instruction::UDiv ||
4622           I.getOpcode() == Instruction::SDiv ||
4623           I.getOpcode() == Instruction::URem ||
4624           I.getOpcode() == Instruction::SRem) &&
4625          "Unexpected instruction");
4626   Value *Divisor = I.getOperand(1);
4627   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4628   return !CInt || CInt->isZero();
4629 }
4630 
4631 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4632                                                VPUser &ArgOperands,
4633                                                VPTransformState &State) {
4634   assert(!isa<DbgInfoIntrinsic>(I) &&
4635          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4636   setDebugLocFromInst(&I);
4637 
4638   Module *M = I.getParent()->getParent()->getParent();
4639   auto *CI = cast<CallInst>(&I);
4640 
4641   SmallVector<Type *, 4> Tys;
4642   for (Value *ArgOperand : CI->args())
4643     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4644 
4645   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4646 
4647   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4648   // version of the instruction.
4649   // Is it beneficial to perform intrinsic call compared to lib call?
4650   bool NeedToScalarize = false;
4651   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4652   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4653   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4654   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4655          "Instruction should be scalarized elsewhere.");
4656   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4657          "Either the intrinsic cost or vector call cost must be valid");
4658 
4659   for (unsigned Part = 0; Part < UF; ++Part) {
4660     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4661     SmallVector<Value *, 4> Args;
4662     for (auto &I : enumerate(ArgOperands.operands())) {
4663       // Some intrinsics have a scalar argument - don't replace it with a
4664       // vector.
4665       Value *Arg;
4666       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4667         Arg = State.get(I.value(), Part);
4668       else {
4669         Arg = State.get(I.value(), VPIteration(0, 0));
4670         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4671           TysForDecl.push_back(Arg->getType());
4672       }
4673       Args.push_back(Arg);
4674     }
4675 
4676     Function *VectorF;
4677     if (UseVectorIntrinsic) {
4678       // Use vector version of the intrinsic.
4679       if (VF.isVector())
4680         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4681       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4682       assert(VectorF && "Can't retrieve vector intrinsic.");
4683     } else {
4684       // Use vector version of the function call.
4685       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4686 #ifndef NDEBUG
4687       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4688              "Can't create vector function.");
4689 #endif
4690         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4691     }
4692       SmallVector<OperandBundleDef, 1> OpBundles;
4693       CI->getOperandBundlesAsDefs(OpBundles);
4694       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4695 
4696       if (isa<FPMathOperator>(V))
4697         V->copyFastMathFlags(CI);
4698 
4699       State.set(Def, V, Part);
4700       addMetadata(V, &I);
4701   }
4702 }
4703 
4704 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4705   // We should not collect Scalars more than once per VF. Right now, this
4706   // function is called from collectUniformsAndScalars(), which already does
4707   // this check. Collecting Scalars for VF=1 does not make any sense.
4708   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4709          "This function should not be visited twice for the same VF");
4710 
4711   SmallSetVector<Instruction *, 8> Worklist;
4712 
4713   // These sets are used to seed the analysis with pointers used by memory
4714   // accesses that will remain scalar.
4715   SmallSetVector<Instruction *, 8> ScalarPtrs;
4716   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4717   auto *Latch = TheLoop->getLoopLatch();
4718 
4719   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4720   // The pointer operands of loads and stores will be scalar as long as the
4721   // memory access is not a gather or scatter operation. The value operand of a
4722   // store will remain scalar if the store is scalarized.
4723   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4724     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4725     assert(WideningDecision != CM_Unknown &&
4726            "Widening decision should be ready at this moment");
4727     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4728       if (Ptr == Store->getValueOperand())
4729         return WideningDecision == CM_Scalarize;
4730     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4731            "Ptr is neither a value or pointer operand");
4732     return WideningDecision != CM_GatherScatter;
4733   };
4734 
4735   // A helper that returns true if the given value is a bitcast or
4736   // getelementptr instruction contained in the loop.
4737   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4738     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4739             isa<GetElementPtrInst>(V)) &&
4740            !TheLoop->isLoopInvariant(V);
4741   };
4742 
4743   // A helper that evaluates a memory access's use of a pointer. If the use will
4744   // be a scalar use and the pointer is only used by memory accesses, we place
4745   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4746   // PossibleNonScalarPtrs.
4747   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4748     // We only care about bitcast and getelementptr instructions contained in
4749     // the loop.
4750     if (!isLoopVaryingBitCastOrGEP(Ptr))
4751       return;
4752 
4753     // If the pointer has already been identified as scalar (e.g., if it was
4754     // also identified as uniform), there's nothing to do.
4755     auto *I = cast<Instruction>(Ptr);
4756     if (Worklist.count(I))
4757       return;
4758 
4759     // If the use of the pointer will be a scalar use, and all users of the
4760     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4761     // place the pointer in PossibleNonScalarPtrs.
4762     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4763           return isa<LoadInst>(U) || isa<StoreInst>(U);
4764         }))
4765       ScalarPtrs.insert(I);
4766     else
4767       PossibleNonScalarPtrs.insert(I);
4768   };
4769 
4770   // We seed the scalars analysis with three classes of instructions: (1)
4771   // instructions marked uniform-after-vectorization and (2) bitcast,
4772   // getelementptr and (pointer) phi instructions used by memory accesses
4773   // requiring a scalar use.
4774   //
4775   // (1) Add to the worklist all instructions that have been identified as
4776   // uniform-after-vectorization.
4777   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4778 
4779   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4780   // memory accesses requiring a scalar use. The pointer operands of loads and
4781   // stores will be scalar as long as the memory accesses is not a gather or
4782   // scatter operation. The value operand of a store will remain scalar if the
4783   // store is scalarized.
4784   for (auto *BB : TheLoop->blocks())
4785     for (auto &I : *BB) {
4786       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4787         evaluatePtrUse(Load, Load->getPointerOperand());
4788       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4789         evaluatePtrUse(Store, Store->getPointerOperand());
4790         evaluatePtrUse(Store, Store->getValueOperand());
4791       }
4792     }
4793   for (auto *I : ScalarPtrs)
4794     if (!PossibleNonScalarPtrs.count(I)) {
4795       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4796       Worklist.insert(I);
4797     }
4798 
4799   // Insert the forced scalars.
4800   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4801   // induction variable when the PHI user is scalarized.
4802   auto ForcedScalar = ForcedScalars.find(VF);
4803   if (ForcedScalar != ForcedScalars.end())
4804     for (auto *I : ForcedScalar->second)
4805       Worklist.insert(I);
4806 
4807   // Expand the worklist by looking through any bitcasts and getelementptr
4808   // instructions we've already identified as scalar. This is similar to the
4809   // expansion step in collectLoopUniforms(); however, here we're only
4810   // expanding to include additional bitcasts and getelementptr instructions.
4811   unsigned Idx = 0;
4812   while (Idx != Worklist.size()) {
4813     Instruction *Dst = Worklist[Idx++];
4814     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4815       continue;
4816     auto *Src = cast<Instruction>(Dst->getOperand(0));
4817     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4818           auto *J = cast<Instruction>(U);
4819           return !TheLoop->contains(J) || Worklist.count(J) ||
4820                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4821                   isScalarUse(J, Src));
4822         })) {
4823       Worklist.insert(Src);
4824       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4825     }
4826   }
4827 
4828   // An induction variable will remain scalar if all users of the induction
4829   // variable and induction variable update remain scalar.
4830   for (auto &Induction : Legal->getInductionVars()) {
4831     auto *Ind = Induction.first;
4832     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4833 
4834     // If tail-folding is applied, the primary induction variable will be used
4835     // to feed a vector compare.
4836     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4837       continue;
4838 
4839     // Returns true if \p Indvar is a pointer induction that is used directly by
4840     // load/store instruction \p I.
4841     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4842                                               Instruction *I) {
4843       return Induction.second.getKind() ==
4844                  InductionDescriptor::IK_PtrInduction &&
4845              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4846              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4847     };
4848 
4849     // Determine if all users of the induction variable are scalar after
4850     // vectorization.
4851     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4852       auto *I = cast<Instruction>(U);
4853       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4854              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4855     });
4856     if (!ScalarInd)
4857       continue;
4858 
4859     // Determine if all users of the induction variable update instruction are
4860     // scalar after vectorization.
4861     auto ScalarIndUpdate =
4862         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4863           auto *I = cast<Instruction>(U);
4864           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4865                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4866         });
4867     if (!ScalarIndUpdate)
4868       continue;
4869 
4870     // The induction variable and its update instruction will remain scalar.
4871     Worklist.insert(Ind);
4872     Worklist.insert(IndUpdate);
4873     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4874     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4875                       << "\n");
4876   }
4877 
4878   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4879 }
4880 
4881 bool LoopVectorizationCostModel::isScalarWithPredication(
4882     Instruction *I, ElementCount VF) const {
4883   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4884     return false;
4885   switch(I->getOpcode()) {
4886   default:
4887     break;
4888   case Instruction::Load:
4889   case Instruction::Store: {
4890     if (!Legal->isMaskRequired(I))
4891       return false;
4892     auto *Ptr = getLoadStorePointerOperand(I);
4893     auto *Ty = getLoadStoreType(I);
4894     Type *VTy = Ty;
4895     if (VF.isVector())
4896       VTy = VectorType::get(Ty, VF);
4897     const Align Alignment = getLoadStoreAlignment(I);
4898     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4899                                 TTI.isLegalMaskedGather(VTy, Alignment))
4900                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4901                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4902   }
4903   case Instruction::UDiv:
4904   case Instruction::SDiv:
4905   case Instruction::SRem:
4906   case Instruction::URem:
4907     return mayDivideByZero(*I);
4908   }
4909   return false;
4910 }
4911 
4912 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4913     Instruction *I, ElementCount VF) {
4914   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4915   assert(getWideningDecision(I, VF) == CM_Unknown &&
4916          "Decision should not be set yet.");
4917   auto *Group = getInterleavedAccessGroup(I);
4918   assert(Group && "Must have a group.");
4919 
4920   // If the instruction's allocated size doesn't equal it's type size, it
4921   // requires padding and will be scalarized.
4922   auto &DL = I->getModule()->getDataLayout();
4923   auto *ScalarTy = getLoadStoreType(I);
4924   if (hasIrregularType(ScalarTy, DL))
4925     return false;
4926 
4927   // Check if masking is required.
4928   // A Group may need masking for one of two reasons: it resides in a block that
4929   // needs predication, or it was decided to use masking to deal with gaps
4930   // (either a gap at the end of a load-access that may result in a speculative
4931   // load, or any gaps in a store-access).
4932   bool PredicatedAccessRequiresMasking =
4933       blockNeedsPredicationForAnyReason(I->getParent()) &&
4934       Legal->isMaskRequired(I);
4935   bool LoadAccessWithGapsRequiresEpilogMasking =
4936       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4937       !isScalarEpilogueAllowed();
4938   bool StoreAccessWithGapsRequiresMasking =
4939       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4940   if (!PredicatedAccessRequiresMasking &&
4941       !LoadAccessWithGapsRequiresEpilogMasking &&
4942       !StoreAccessWithGapsRequiresMasking)
4943     return true;
4944 
4945   // If masked interleaving is required, we expect that the user/target had
4946   // enabled it, because otherwise it either wouldn't have been created or
4947   // it should have been invalidated by the CostModel.
4948   assert(useMaskedInterleavedAccesses(TTI) &&
4949          "Masked interleave-groups for predicated accesses are not enabled.");
4950 
4951   if (Group->isReverse())
4952     return false;
4953 
4954   auto *Ty = getLoadStoreType(I);
4955   const Align Alignment = getLoadStoreAlignment(I);
4956   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4957                           : TTI.isLegalMaskedStore(Ty, Alignment);
4958 }
4959 
4960 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4961     Instruction *I, ElementCount VF) {
4962   // Get and ensure we have a valid memory instruction.
4963   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4964 
4965   auto *Ptr = getLoadStorePointerOperand(I);
4966   auto *ScalarTy = getLoadStoreType(I);
4967 
4968   // In order to be widened, the pointer should be consecutive, first of all.
4969   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4970     return false;
4971 
4972   // If the instruction is a store located in a predicated block, it will be
4973   // scalarized.
4974   if (isScalarWithPredication(I, VF))
4975     return false;
4976 
4977   // If the instruction's allocated size doesn't equal it's type size, it
4978   // requires padding and will be scalarized.
4979   auto &DL = I->getModule()->getDataLayout();
4980   if (hasIrregularType(ScalarTy, DL))
4981     return false;
4982 
4983   return true;
4984 }
4985 
4986 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4987   // We should not collect Uniforms more than once per VF. Right now,
4988   // this function is called from collectUniformsAndScalars(), which
4989   // already does this check. Collecting Uniforms for VF=1 does not make any
4990   // sense.
4991 
4992   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4993          "This function should not be visited twice for the same VF");
4994 
4995   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4996   // not analyze again.  Uniforms.count(VF) will return 1.
4997   Uniforms[VF].clear();
4998 
4999   // We now know that the loop is vectorizable!
5000   // Collect instructions inside the loop that will remain uniform after
5001   // vectorization.
5002 
5003   // Global values, params and instructions outside of current loop are out of
5004   // scope.
5005   auto isOutOfScope = [&](Value *V) -> bool {
5006     Instruction *I = dyn_cast<Instruction>(V);
5007     return (!I || !TheLoop->contains(I));
5008   };
5009 
5010   // Worklist containing uniform instructions demanding lane 0.
5011   SetVector<Instruction *> Worklist;
5012   BasicBlock *Latch = TheLoop->getLoopLatch();
5013 
5014   // Add uniform instructions demanding lane 0 to the worklist. Instructions
5015   // that are scalar with predication must not be considered uniform after
5016   // vectorization, because that would create an erroneous replicating region
5017   // where only a single instance out of VF should be formed.
5018   // TODO: optimize such seldom cases if found important, see PR40816.
5019   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5020     if (isOutOfScope(I)) {
5021       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5022                         << *I << "\n");
5023       return;
5024     }
5025     if (isScalarWithPredication(I, VF)) {
5026       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5027                         << *I << "\n");
5028       return;
5029     }
5030     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5031     Worklist.insert(I);
5032   };
5033 
5034   // Start with the conditional branch. If the branch condition is an
5035   // instruction contained in the loop that is only used by the branch, it is
5036   // uniform.
5037   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5038   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5039     addToWorklistIfAllowed(Cmp);
5040 
5041   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5042     InstWidening WideningDecision = getWideningDecision(I, VF);
5043     assert(WideningDecision != CM_Unknown &&
5044            "Widening decision should be ready at this moment");
5045 
5046     // A uniform memory op is itself uniform.  We exclude uniform stores
5047     // here as they demand the last lane, not the first one.
5048     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5049       assert(WideningDecision == CM_Scalarize);
5050       return true;
5051     }
5052 
5053     return (WideningDecision == CM_Widen ||
5054             WideningDecision == CM_Widen_Reverse ||
5055             WideningDecision == CM_Interleave);
5056   };
5057 
5058 
5059   // Returns true if Ptr is the pointer operand of a memory access instruction
5060   // I, and I is known to not require scalarization.
5061   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5062     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5063   };
5064 
5065   // Holds a list of values which are known to have at least one uniform use.
5066   // Note that there may be other uses which aren't uniform.  A "uniform use"
5067   // here is something which only demands lane 0 of the unrolled iterations;
5068   // it does not imply that all lanes produce the same value (e.g. this is not
5069   // the usual meaning of uniform)
5070   SetVector<Value *> HasUniformUse;
5071 
5072   // Scan the loop for instructions which are either a) known to have only
5073   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5074   for (auto *BB : TheLoop->blocks())
5075     for (auto &I : *BB) {
5076       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5077         switch (II->getIntrinsicID()) {
5078         case Intrinsic::sideeffect:
5079         case Intrinsic::experimental_noalias_scope_decl:
5080         case Intrinsic::assume:
5081         case Intrinsic::lifetime_start:
5082         case Intrinsic::lifetime_end:
5083           if (TheLoop->hasLoopInvariantOperands(&I))
5084             addToWorklistIfAllowed(&I);
5085           break;
5086         default:
5087           break;
5088         }
5089       }
5090 
5091       // ExtractValue instructions must be uniform, because the operands are
5092       // known to be loop-invariant.
5093       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5094         assert(isOutOfScope(EVI->getAggregateOperand()) &&
5095                "Expected aggregate value to be loop invariant");
5096         addToWorklistIfAllowed(EVI);
5097         continue;
5098       }
5099 
5100       // If there's no pointer operand, there's nothing to do.
5101       auto *Ptr = getLoadStorePointerOperand(&I);
5102       if (!Ptr)
5103         continue;
5104 
5105       // A uniform memory op is itself uniform.  We exclude uniform stores
5106       // here as they demand the last lane, not the first one.
5107       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5108         addToWorklistIfAllowed(&I);
5109 
5110       if (isUniformDecision(&I, VF)) {
5111         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5112         HasUniformUse.insert(Ptr);
5113       }
5114     }
5115 
5116   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5117   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5118   // disallows uses outside the loop as well.
5119   for (auto *V : HasUniformUse) {
5120     if (isOutOfScope(V))
5121       continue;
5122     auto *I = cast<Instruction>(V);
5123     auto UsersAreMemAccesses =
5124       llvm::all_of(I->users(), [&](User *U) -> bool {
5125         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5126       });
5127     if (UsersAreMemAccesses)
5128       addToWorklistIfAllowed(I);
5129   }
5130 
5131   // Expand Worklist in topological order: whenever a new instruction
5132   // is added , its users should be already inside Worklist.  It ensures
5133   // a uniform instruction will only be used by uniform instructions.
5134   unsigned idx = 0;
5135   while (idx != Worklist.size()) {
5136     Instruction *I = Worklist[idx++];
5137 
5138     for (auto OV : I->operand_values()) {
5139       // isOutOfScope operands cannot be uniform instructions.
5140       if (isOutOfScope(OV))
5141         continue;
5142       // First order recurrence Phi's should typically be considered
5143       // non-uniform.
5144       auto *OP = dyn_cast<PHINode>(OV);
5145       if (OP && Legal->isFirstOrderRecurrence(OP))
5146         continue;
5147       // If all the users of the operand are uniform, then add the
5148       // operand into the uniform worklist.
5149       auto *OI = cast<Instruction>(OV);
5150       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5151             auto *J = cast<Instruction>(U);
5152             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5153           }))
5154         addToWorklistIfAllowed(OI);
5155     }
5156   }
5157 
5158   // For an instruction to be added into Worklist above, all its users inside
5159   // the loop should also be in Worklist. However, this condition cannot be
5160   // true for phi nodes that form a cyclic dependence. We must process phi
5161   // nodes separately. An induction variable will remain uniform if all users
5162   // of the induction variable and induction variable update remain uniform.
5163   // The code below handles both pointer and non-pointer induction variables.
5164   for (auto &Induction : Legal->getInductionVars()) {
5165     auto *Ind = Induction.first;
5166     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5167 
5168     // Determine if all users of the induction variable are uniform after
5169     // vectorization.
5170     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5171       auto *I = cast<Instruction>(U);
5172       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5173              isVectorizedMemAccessUse(I, Ind);
5174     });
5175     if (!UniformInd)
5176       continue;
5177 
5178     // Determine if all users of the induction variable update instruction are
5179     // uniform after vectorization.
5180     auto UniformIndUpdate =
5181         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5182           auto *I = cast<Instruction>(U);
5183           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5184                  isVectorizedMemAccessUse(I, IndUpdate);
5185         });
5186     if (!UniformIndUpdate)
5187       continue;
5188 
5189     // The induction variable and its update instruction will remain uniform.
5190     addToWorklistIfAllowed(Ind);
5191     addToWorklistIfAllowed(IndUpdate);
5192   }
5193 
5194   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5195 }
5196 
5197 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5198   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5199 
5200   if (Legal->getRuntimePointerChecking()->Need) {
5201     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5202         "runtime pointer checks needed. Enable vectorization of this "
5203         "loop with '#pragma clang loop vectorize(enable)' when "
5204         "compiling with -Os/-Oz",
5205         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5206     return true;
5207   }
5208 
5209   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5210     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5211         "runtime SCEV checks needed. Enable vectorization of this "
5212         "loop with '#pragma clang loop vectorize(enable)' when "
5213         "compiling with -Os/-Oz",
5214         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5215     return true;
5216   }
5217 
5218   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5219   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5220     reportVectorizationFailure("Runtime stride check for small trip count",
5221         "runtime stride == 1 checks needed. Enable vectorization of "
5222         "this loop without such check by compiling with -Os/-Oz",
5223         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5224     return true;
5225   }
5226 
5227   return false;
5228 }
5229 
5230 ElementCount
5231 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5232   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5233     return ElementCount::getScalable(0);
5234 
5235   if (Hints->isScalableVectorizationDisabled()) {
5236     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5237                             "ScalableVectorizationDisabled", ORE, TheLoop);
5238     return ElementCount::getScalable(0);
5239   }
5240 
5241   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5242 
5243   auto MaxScalableVF = ElementCount::getScalable(
5244       std::numeric_limits<ElementCount::ScalarTy>::max());
5245 
5246   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5247   // FIXME: While for scalable vectors this is currently sufficient, this should
5248   // be replaced by a more detailed mechanism that filters out specific VFs,
5249   // instead of invalidating vectorization for a whole set of VFs based on the
5250   // MaxVF.
5251 
5252   // Disable scalable vectorization if the loop contains unsupported reductions.
5253   if (!canVectorizeReductions(MaxScalableVF)) {
5254     reportVectorizationInfo(
5255         "Scalable vectorization not supported for the reduction "
5256         "operations found in this loop.",
5257         "ScalableVFUnfeasible", ORE, TheLoop);
5258     return ElementCount::getScalable(0);
5259   }
5260 
5261   // Disable scalable vectorization if the loop contains any instructions
5262   // with element types not supported for scalable vectors.
5263   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5264         return !Ty->isVoidTy() &&
5265                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5266       })) {
5267     reportVectorizationInfo("Scalable vectorization is not supported "
5268                             "for all element types found in this loop.",
5269                             "ScalableVFUnfeasible", ORE, TheLoop);
5270     return ElementCount::getScalable(0);
5271   }
5272 
5273   if (Legal->isSafeForAnyVectorWidth())
5274     return MaxScalableVF;
5275 
5276   // Limit MaxScalableVF by the maximum safe dependence distance.
5277   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5278   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5279     MaxVScale =
5280         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5281   MaxScalableVF = ElementCount::getScalable(
5282       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5283   if (!MaxScalableVF)
5284     reportVectorizationInfo(
5285         "Max legal vector width too small, scalable vectorization "
5286         "unfeasible.",
5287         "ScalableVFUnfeasible", ORE, TheLoop);
5288 
5289   return MaxScalableVF;
5290 }
5291 
5292 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5293     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5294   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5295   unsigned SmallestType, WidestType;
5296   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5297 
5298   // Get the maximum safe dependence distance in bits computed by LAA.
5299   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5300   // the memory accesses that is most restrictive (involved in the smallest
5301   // dependence distance).
5302   unsigned MaxSafeElements =
5303       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5304 
5305   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5306   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5307 
5308   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5309                     << ".\n");
5310   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5311                     << ".\n");
5312 
5313   // First analyze the UserVF, fall back if the UserVF should be ignored.
5314   if (UserVF) {
5315     auto MaxSafeUserVF =
5316         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5317 
5318     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5319       // If `VF=vscale x N` is safe, then so is `VF=N`
5320       if (UserVF.isScalable())
5321         return FixedScalableVFPair(
5322             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5323       else
5324         return UserVF;
5325     }
5326 
5327     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5328 
5329     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5330     // is better to ignore the hint and let the compiler choose a suitable VF.
5331     if (!UserVF.isScalable()) {
5332       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5333                         << " is unsafe, clamping to max safe VF="
5334                         << MaxSafeFixedVF << ".\n");
5335       ORE->emit([&]() {
5336         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5337                                           TheLoop->getStartLoc(),
5338                                           TheLoop->getHeader())
5339                << "User-specified vectorization factor "
5340                << ore::NV("UserVectorizationFactor", UserVF)
5341                << " is unsafe, clamping to maximum safe vectorization factor "
5342                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5343       });
5344       return MaxSafeFixedVF;
5345     }
5346 
5347     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5348       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5349                         << " is ignored because scalable vectors are not "
5350                            "available.\n");
5351       ORE->emit([&]() {
5352         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5353                                           TheLoop->getStartLoc(),
5354                                           TheLoop->getHeader())
5355                << "User-specified vectorization factor "
5356                << ore::NV("UserVectorizationFactor", UserVF)
5357                << " is ignored because the target does not support scalable "
5358                   "vectors. The compiler will pick a more suitable value.";
5359       });
5360     } else {
5361       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5362                         << " is unsafe. Ignoring scalable UserVF.\n");
5363       ORE->emit([&]() {
5364         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5365                                           TheLoop->getStartLoc(),
5366                                           TheLoop->getHeader())
5367                << "User-specified vectorization factor "
5368                << ore::NV("UserVectorizationFactor", UserVF)
5369                << " is unsafe. Ignoring the hint to let the compiler pick a "
5370                   "more suitable value.";
5371       });
5372     }
5373   }
5374 
5375   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5376                     << " / " << WidestType << " bits.\n");
5377 
5378   FixedScalableVFPair Result(ElementCount::getFixed(1),
5379                              ElementCount::getScalable(0));
5380   if (auto MaxVF =
5381           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5382                                   MaxSafeFixedVF, FoldTailByMasking))
5383     Result.FixedVF = MaxVF;
5384 
5385   if (auto MaxVF =
5386           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5387                                   MaxSafeScalableVF, FoldTailByMasking))
5388     if (MaxVF.isScalable()) {
5389       Result.ScalableVF = MaxVF;
5390       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5391                         << "\n");
5392     }
5393 
5394   return Result;
5395 }
5396 
5397 FixedScalableVFPair
5398 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5399   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5400     // TODO: It may by useful to do since it's still likely to be dynamically
5401     // uniform if the target can skip.
5402     reportVectorizationFailure(
5403         "Not inserting runtime ptr check for divergent target",
5404         "runtime pointer checks needed. Not enabled for divergent target",
5405         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5406     return FixedScalableVFPair::getNone();
5407   }
5408 
5409   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5410   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5411   if (TC == 1) {
5412     reportVectorizationFailure("Single iteration (non) loop",
5413         "loop trip count is one, irrelevant for vectorization",
5414         "SingleIterationLoop", ORE, TheLoop);
5415     return FixedScalableVFPair::getNone();
5416   }
5417 
5418   switch (ScalarEpilogueStatus) {
5419   case CM_ScalarEpilogueAllowed:
5420     return computeFeasibleMaxVF(TC, UserVF, false);
5421   case CM_ScalarEpilogueNotAllowedUsePredicate:
5422     LLVM_FALLTHROUGH;
5423   case CM_ScalarEpilogueNotNeededUsePredicate:
5424     LLVM_DEBUG(
5425         dbgs() << "LV: vector predicate hint/switch found.\n"
5426                << "LV: Not allowing scalar epilogue, creating predicated "
5427                << "vector loop.\n");
5428     break;
5429   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5430     // fallthrough as a special case of OptForSize
5431   case CM_ScalarEpilogueNotAllowedOptSize:
5432     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5433       LLVM_DEBUG(
5434           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5435     else
5436       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5437                         << "count.\n");
5438 
5439     // Bail if runtime checks are required, which are not good when optimising
5440     // for size.
5441     if (runtimeChecksRequired())
5442       return FixedScalableVFPair::getNone();
5443 
5444     break;
5445   }
5446 
5447   // The only loops we can vectorize without a scalar epilogue, are loops with
5448   // a bottom-test and a single exiting block. We'd have to handle the fact
5449   // that not every instruction executes on the last iteration.  This will
5450   // require a lane mask which varies through the vector loop body.  (TODO)
5451   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5452     // If there was a tail-folding hint/switch, but we can't fold the tail by
5453     // masking, fallback to a vectorization with a scalar epilogue.
5454     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5455       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5456                            "scalar epilogue instead.\n");
5457       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5458       return computeFeasibleMaxVF(TC, UserVF, false);
5459     }
5460     return FixedScalableVFPair::getNone();
5461   }
5462 
5463   // Now try the tail folding
5464 
5465   // Invalidate interleave groups that require an epilogue if we can't mask
5466   // the interleave-group.
5467   if (!useMaskedInterleavedAccesses(TTI)) {
5468     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5469            "No decisions should have been taken at this point");
5470     // Note: There is no need to invalidate any cost modeling decisions here, as
5471     // non where taken so far.
5472     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5473   }
5474 
5475   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5476   // Avoid tail folding if the trip count is known to be a multiple of any VF
5477   // we chose.
5478   // FIXME: The condition below pessimises the case for fixed-width vectors,
5479   // when scalable VFs are also candidates for vectorization.
5480   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5481     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5482     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5483            "MaxFixedVF must be a power of 2");
5484     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5485                                    : MaxFixedVF.getFixedValue();
5486     ScalarEvolution *SE = PSE.getSE();
5487     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5488     const SCEV *ExitCount = SE->getAddExpr(
5489         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5490     const SCEV *Rem = SE->getURemExpr(
5491         SE->applyLoopGuards(ExitCount, TheLoop),
5492         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5493     if (Rem->isZero()) {
5494       // Accept MaxFixedVF if we do not have a tail.
5495       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5496       return MaxFactors;
5497     }
5498   }
5499 
5500   // For scalable vectors don't use tail folding for low trip counts or
5501   // optimizing for code size. We only permit this if the user has explicitly
5502   // requested it.
5503   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5504       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5505       MaxFactors.ScalableVF.isVector())
5506     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5507 
5508   // If we don't know the precise trip count, or if the trip count that we
5509   // found modulo the vectorization factor is not zero, try to fold the tail
5510   // by masking.
5511   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5512   if (Legal->prepareToFoldTailByMasking()) {
5513     FoldTailByMasking = true;
5514     return MaxFactors;
5515   }
5516 
5517   // If there was a tail-folding hint/switch, but we can't fold the tail by
5518   // masking, fallback to a vectorization with a scalar epilogue.
5519   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5520     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5521                          "scalar epilogue instead.\n");
5522     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5523     return MaxFactors;
5524   }
5525 
5526   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5527     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5528     return FixedScalableVFPair::getNone();
5529   }
5530 
5531   if (TC == 0) {
5532     reportVectorizationFailure(
5533         "Unable to calculate the loop count due to complex control flow",
5534         "unable to calculate the loop count due to complex control flow",
5535         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5536     return FixedScalableVFPair::getNone();
5537   }
5538 
5539   reportVectorizationFailure(
5540       "Cannot optimize for size and vectorize at the same time.",
5541       "cannot optimize for size and vectorize at the same time. "
5542       "Enable vectorization of this loop with '#pragma clang loop "
5543       "vectorize(enable)' when compiling with -Os/-Oz",
5544       "NoTailLoopWithOptForSize", ORE, TheLoop);
5545   return FixedScalableVFPair::getNone();
5546 }
5547 
5548 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5549     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5550     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5551   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5552   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5553       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5554                            : TargetTransformInfo::RGK_FixedWidthVector);
5555 
5556   // Convenience function to return the minimum of two ElementCounts.
5557   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5558     assert((LHS.isScalable() == RHS.isScalable()) &&
5559            "Scalable flags must match");
5560     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5561   };
5562 
5563   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5564   // Note that both WidestRegister and WidestType may not be a powers of 2.
5565   auto MaxVectorElementCount = ElementCount::get(
5566       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5567       ComputeScalableMaxVF);
5568   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5569   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5570                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5571 
5572   if (!MaxVectorElementCount) {
5573     LLVM_DEBUG(dbgs() << "LV: The target has no "
5574                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5575                       << " vector registers.\n");
5576     return ElementCount::getFixed(1);
5577   }
5578 
5579   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5580   if (ConstTripCount &&
5581       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5582       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5583     // If loop trip count (TC) is known at compile time there is no point in
5584     // choosing VF greater than TC (as done in the loop below). Select maximum
5585     // power of two which doesn't exceed TC.
5586     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5587     // when the TC is less than or equal to the known number of lanes.
5588     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5589     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5590                          "exceeding the constant trip count: "
5591                       << ClampedConstTripCount << "\n");
5592     return ElementCount::getFixed(ClampedConstTripCount);
5593   }
5594 
5595   ElementCount MaxVF = MaxVectorElementCount;
5596   if (TTI.shouldMaximizeVectorBandwidth() ||
5597       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5598     auto MaxVectorElementCountMaxBW = ElementCount::get(
5599         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5600         ComputeScalableMaxVF);
5601     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5602 
5603     // Collect all viable vectorization factors larger than the default MaxVF
5604     // (i.e. MaxVectorElementCount).
5605     SmallVector<ElementCount, 8> VFs;
5606     for (ElementCount VS = MaxVectorElementCount * 2;
5607          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5608       VFs.push_back(VS);
5609 
5610     // For each VF calculate its register usage.
5611     auto RUs = calculateRegisterUsage(VFs);
5612 
5613     // Select the largest VF which doesn't require more registers than existing
5614     // ones.
5615     for (int i = RUs.size() - 1; i >= 0; --i) {
5616       bool Selected = true;
5617       for (auto &pair : RUs[i].MaxLocalUsers) {
5618         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5619         if (pair.second > TargetNumRegisters)
5620           Selected = false;
5621       }
5622       if (Selected) {
5623         MaxVF = VFs[i];
5624         break;
5625       }
5626     }
5627     if (ElementCount MinVF =
5628             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5629       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5630         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5631                           << ") with target's minimum: " << MinVF << '\n');
5632         MaxVF = MinVF;
5633       }
5634     }
5635   }
5636   return MaxVF;
5637 }
5638 
5639 bool LoopVectorizationCostModel::isMoreProfitable(
5640     const VectorizationFactor &A, const VectorizationFactor &B) const {
5641   InstructionCost CostA = A.Cost;
5642   InstructionCost CostB = B.Cost;
5643 
5644   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5645 
5646   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5647       MaxTripCount) {
5648     // If we are folding the tail and the trip count is a known (possibly small)
5649     // constant, the trip count will be rounded up to an integer number of
5650     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5651     // which we compare directly. When not folding the tail, the total cost will
5652     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5653     // approximated with the per-lane cost below instead of using the tripcount
5654     // as here.
5655     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5656     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5657     return RTCostA < RTCostB;
5658   }
5659 
5660   // Improve estimate for the vector width if it is scalable.
5661   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5662   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5663   if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) {
5664     if (A.Width.isScalable())
5665       EstimatedWidthA *= VScale.getValue();
5666     if (B.Width.isScalable())
5667       EstimatedWidthB *= VScale.getValue();
5668   }
5669 
5670   // Assume vscale may be larger than 1 (or the value being tuned for),
5671   // so that scalable vectorization is slightly favorable over fixed-width
5672   // vectorization.
5673   if (A.Width.isScalable() && !B.Width.isScalable())
5674     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5675 
5676   // To avoid the need for FP division:
5677   //      (CostA / A.Width) < (CostB / B.Width)
5678   // <=>  (CostA * B.Width) < (CostB * A.Width)
5679   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5680 }
5681 
5682 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5683     const ElementCountSet &VFCandidates) {
5684   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5685   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5686   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5687   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5688          "Expected Scalar VF to be a candidate");
5689 
5690   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5691   VectorizationFactor ChosenFactor = ScalarCost;
5692 
5693   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5694   if (ForceVectorization && VFCandidates.size() > 1) {
5695     // Ignore scalar width, because the user explicitly wants vectorization.
5696     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5697     // evaluation.
5698     ChosenFactor.Cost = InstructionCost::getMax();
5699   }
5700 
5701   SmallVector<InstructionVFPair> InvalidCosts;
5702   for (const auto &i : VFCandidates) {
5703     // The cost for scalar VF=1 is already calculated, so ignore it.
5704     if (i.isScalar())
5705       continue;
5706 
5707     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5708     VectorizationFactor Candidate(i, C.first);
5709 
5710 #ifndef NDEBUG
5711     unsigned AssumedMinimumVscale = 1;
5712     if (Optional<unsigned> VScale = TTI.getVScaleForTuning())
5713       AssumedMinimumVscale = VScale.getValue();
5714     unsigned Width =
5715         Candidate.Width.isScalable()
5716             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5717             : Candidate.Width.getFixedValue();
5718     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5719                       << " costs: " << (Candidate.Cost / Width));
5720     if (i.isScalable())
5721       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5722                         << AssumedMinimumVscale << ")");
5723     LLVM_DEBUG(dbgs() << ".\n");
5724 #endif
5725 
5726     if (!C.second && !ForceVectorization) {
5727       LLVM_DEBUG(
5728           dbgs() << "LV: Not considering vector loop of width " << i
5729                  << " because it will not generate any vector instructions.\n");
5730       continue;
5731     }
5732 
5733     // If profitable add it to ProfitableVF list.
5734     if (isMoreProfitable(Candidate, ScalarCost))
5735       ProfitableVFs.push_back(Candidate);
5736 
5737     if (isMoreProfitable(Candidate, ChosenFactor))
5738       ChosenFactor = Candidate;
5739   }
5740 
5741   // Emit a report of VFs with invalid costs in the loop.
5742   if (!InvalidCosts.empty()) {
5743     // Group the remarks per instruction, keeping the instruction order from
5744     // InvalidCosts.
5745     std::map<Instruction *, unsigned> Numbering;
5746     unsigned I = 0;
5747     for (auto &Pair : InvalidCosts)
5748       if (!Numbering.count(Pair.first))
5749         Numbering[Pair.first] = I++;
5750 
5751     // Sort the list, first on instruction(number) then on VF.
5752     llvm::sort(InvalidCosts,
5753                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5754                  if (Numbering[A.first] != Numbering[B.first])
5755                    return Numbering[A.first] < Numbering[B.first];
5756                  ElementCountComparator ECC;
5757                  return ECC(A.second, B.second);
5758                });
5759 
5760     // For a list of ordered instruction-vf pairs:
5761     //   [(load, vf1), (load, vf2), (store, vf1)]
5762     // Group the instructions together to emit separate remarks for:
5763     //   load  (vf1, vf2)
5764     //   store (vf1)
5765     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5766     auto Subset = ArrayRef<InstructionVFPair>();
5767     do {
5768       if (Subset.empty())
5769         Subset = Tail.take_front(1);
5770 
5771       Instruction *I = Subset.front().first;
5772 
5773       // If the next instruction is different, or if there are no other pairs,
5774       // emit a remark for the collated subset. e.g.
5775       //   [(load, vf1), (load, vf2))]
5776       // to emit:
5777       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5778       if (Subset == Tail || Tail[Subset.size()].first != I) {
5779         std::string OutString;
5780         raw_string_ostream OS(OutString);
5781         assert(!Subset.empty() && "Unexpected empty range");
5782         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5783         for (auto &Pair : Subset)
5784           OS << (Pair.second == Subset.front().second ? "" : ", ")
5785              << Pair.second;
5786         OS << "):";
5787         if (auto *CI = dyn_cast<CallInst>(I))
5788           OS << " call to " << CI->getCalledFunction()->getName();
5789         else
5790           OS << " " << I->getOpcodeName();
5791         OS.flush();
5792         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5793         Tail = Tail.drop_front(Subset.size());
5794         Subset = {};
5795       } else
5796         // Grow the subset by one element
5797         Subset = Tail.take_front(Subset.size() + 1);
5798     } while (!Tail.empty());
5799   }
5800 
5801   if (!EnableCondStoresVectorization && NumPredStores) {
5802     reportVectorizationFailure("There are conditional stores.",
5803         "store that is conditionally executed prevents vectorization",
5804         "ConditionalStore", ORE, TheLoop);
5805     ChosenFactor = ScalarCost;
5806   }
5807 
5808   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5809                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5810              << "LV: Vectorization seems to be not beneficial, "
5811              << "but was forced by a user.\n");
5812   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5813   return ChosenFactor;
5814 }
5815 
5816 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5817     const Loop &L, ElementCount VF) const {
5818   // Cross iteration phis such as reductions need special handling and are
5819   // currently unsupported.
5820   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5821         return Legal->isFirstOrderRecurrence(&Phi) ||
5822                Legal->isReductionVariable(&Phi);
5823       }))
5824     return false;
5825 
5826   // Phis with uses outside of the loop require special handling and are
5827   // currently unsupported.
5828   for (auto &Entry : Legal->getInductionVars()) {
5829     // Look for uses of the value of the induction at the last iteration.
5830     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5831     for (User *U : PostInc->users())
5832       if (!L.contains(cast<Instruction>(U)))
5833         return false;
5834     // Look for uses of penultimate value of the induction.
5835     for (User *U : Entry.first->users())
5836       if (!L.contains(cast<Instruction>(U)))
5837         return false;
5838   }
5839 
5840   // Induction variables that are widened require special handling that is
5841   // currently not supported.
5842   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5843         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5844                  this->isProfitableToScalarize(Entry.first, VF));
5845       }))
5846     return false;
5847 
5848   // Epilogue vectorization code has not been auditted to ensure it handles
5849   // non-latch exits properly.  It may be fine, but it needs auditted and
5850   // tested.
5851   if (L.getExitingBlock() != L.getLoopLatch())
5852     return false;
5853 
5854   return true;
5855 }
5856 
5857 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5858     const ElementCount VF) const {
5859   // FIXME: We need a much better cost-model to take different parameters such
5860   // as register pressure, code size increase and cost of extra branches into
5861   // account. For now we apply a very crude heuristic and only consider loops
5862   // with vectorization factors larger than a certain value.
5863   // We also consider epilogue vectorization unprofitable for targets that don't
5864   // consider interleaving beneficial (eg. MVE).
5865   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5866     return false;
5867   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5868     return true;
5869   return false;
5870 }
5871 
5872 VectorizationFactor
5873 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5874     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5875   VectorizationFactor Result = VectorizationFactor::Disabled();
5876   if (!EnableEpilogueVectorization) {
5877     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5878     return Result;
5879   }
5880 
5881   if (!isScalarEpilogueAllowed()) {
5882     LLVM_DEBUG(
5883         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5884                   "allowed.\n";);
5885     return Result;
5886   }
5887 
5888   // Not really a cost consideration, but check for unsupported cases here to
5889   // simplify the logic.
5890   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5891     LLVM_DEBUG(
5892         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5893                   "not a supported candidate.\n";);
5894     return Result;
5895   }
5896 
5897   if (EpilogueVectorizationForceVF > 1) {
5898     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5899     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5900     if (LVP.hasPlanWithVF(ForcedEC))
5901       return {ForcedEC, 0};
5902     else {
5903       LLVM_DEBUG(
5904           dbgs()
5905               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5906       return Result;
5907     }
5908   }
5909 
5910   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5911       TheLoop->getHeader()->getParent()->hasMinSize()) {
5912     LLVM_DEBUG(
5913         dbgs()
5914             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5915     return Result;
5916   }
5917 
5918   auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5919   if (MainLoopVF.isScalable())
5920     LLVM_DEBUG(
5921         dbgs() << "LEV: Epilogue vectorization using scalable vectors not "
5922                   "yet supported. Converting to fixed-width (VF="
5923                << FixedMainLoopVF << ") instead\n");
5924 
5925   if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) {
5926     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5927                          "this loop\n");
5928     return Result;
5929   }
5930 
5931   for (auto &NextVF : ProfitableVFs)
5932     if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) &&
5933         (Result.Width.getFixedValue() == 1 ||
5934          isMoreProfitable(NextVF, Result)) &&
5935         LVP.hasPlanWithVF(NextVF.Width))
5936       Result = NextVF;
5937 
5938   if (Result != VectorizationFactor::Disabled())
5939     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5940                       << Result.Width.getFixedValue() << "\n";);
5941   return Result;
5942 }
5943 
5944 std::pair<unsigned, unsigned>
5945 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5946   unsigned MinWidth = -1U;
5947   unsigned MaxWidth = 8;
5948   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5949   // For in-loop reductions, no element types are added to ElementTypesInLoop
5950   // if there are no loads/stores in the loop. In this case, check through the
5951   // reduction variables to determine the maximum width.
5952   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5953     // Reset MaxWidth so that we can find the smallest type used by recurrences
5954     // in the loop.
5955     MaxWidth = -1U;
5956     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5957       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5958       // When finding the min width used by the recurrence we need to account
5959       // for casts on the input operands of the recurrence.
5960       MaxWidth = std::min<unsigned>(
5961           MaxWidth, std::min<unsigned>(
5962                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5963                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5964     }
5965   } else {
5966     for (Type *T : ElementTypesInLoop) {
5967       MinWidth = std::min<unsigned>(
5968           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5969       MaxWidth = std::max<unsigned>(
5970           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5971     }
5972   }
5973   return {MinWidth, MaxWidth};
5974 }
5975 
5976 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5977   ElementTypesInLoop.clear();
5978   // For each block.
5979   for (BasicBlock *BB : TheLoop->blocks()) {
5980     // For each instruction in the loop.
5981     for (Instruction &I : BB->instructionsWithoutDebug()) {
5982       Type *T = I.getType();
5983 
5984       // Skip ignored values.
5985       if (ValuesToIgnore.count(&I))
5986         continue;
5987 
5988       // Only examine Loads, Stores and PHINodes.
5989       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5990         continue;
5991 
5992       // Examine PHI nodes that are reduction variables. Update the type to
5993       // account for the recurrence type.
5994       if (auto *PN = dyn_cast<PHINode>(&I)) {
5995         if (!Legal->isReductionVariable(PN))
5996           continue;
5997         const RecurrenceDescriptor &RdxDesc =
5998             Legal->getReductionVars().find(PN)->second;
5999         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6000             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6001                                       RdxDesc.getRecurrenceType(),
6002                                       TargetTransformInfo::ReductionFlags()))
6003           continue;
6004         T = RdxDesc.getRecurrenceType();
6005       }
6006 
6007       // Examine the stored values.
6008       if (auto *ST = dyn_cast<StoreInst>(&I))
6009         T = ST->getValueOperand()->getType();
6010 
6011       assert(T->isSized() &&
6012              "Expected the load/store/recurrence type to be sized");
6013 
6014       ElementTypesInLoop.insert(T);
6015     }
6016   }
6017 }
6018 
6019 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6020                                                            unsigned LoopCost) {
6021   // -- The interleave heuristics --
6022   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6023   // There are many micro-architectural considerations that we can't predict
6024   // at this level. For example, frontend pressure (on decode or fetch) due to
6025   // code size, or the number and capabilities of the execution ports.
6026   //
6027   // We use the following heuristics to select the interleave count:
6028   // 1. If the code has reductions, then we interleave to break the cross
6029   // iteration dependency.
6030   // 2. If the loop is really small, then we interleave to reduce the loop
6031   // overhead.
6032   // 3. We don't interleave if we think that we will spill registers to memory
6033   // due to the increased register pressure.
6034 
6035   if (!isScalarEpilogueAllowed())
6036     return 1;
6037 
6038   // We used the distance for the interleave count.
6039   if (Legal->getMaxSafeDepDistBytes() != -1U)
6040     return 1;
6041 
6042   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6043   const bool HasReductions = !Legal->getReductionVars().empty();
6044   // Do not interleave loops with a relatively small known or estimated trip
6045   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6046   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6047   // because with the above conditions interleaving can expose ILP and break
6048   // cross iteration dependences for reductions.
6049   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6050       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6051     return 1;
6052 
6053   RegisterUsage R = calculateRegisterUsage({VF})[0];
6054   // We divide by these constants so assume that we have at least one
6055   // instruction that uses at least one register.
6056   for (auto& pair : R.MaxLocalUsers) {
6057     pair.second = std::max(pair.second, 1U);
6058   }
6059 
6060   // We calculate the interleave count using the following formula.
6061   // Subtract the number of loop invariants from the number of available
6062   // registers. These registers are used by all of the interleaved instances.
6063   // Next, divide the remaining registers by the number of registers that is
6064   // required by the loop, in order to estimate how many parallel instances
6065   // fit without causing spills. All of this is rounded down if necessary to be
6066   // a power of two. We want power of two interleave count to simplify any
6067   // addressing operations or alignment considerations.
6068   // We also want power of two interleave counts to ensure that the induction
6069   // variable of the vector loop wraps to zero, when tail is folded by masking;
6070   // this currently happens when OptForSize, in which case IC is set to 1 above.
6071   unsigned IC = UINT_MAX;
6072 
6073   for (auto& pair : R.MaxLocalUsers) {
6074     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6075     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6076                       << " registers of "
6077                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6078     if (VF.isScalar()) {
6079       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6080         TargetNumRegisters = ForceTargetNumScalarRegs;
6081     } else {
6082       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6083         TargetNumRegisters = ForceTargetNumVectorRegs;
6084     }
6085     unsigned MaxLocalUsers = pair.second;
6086     unsigned LoopInvariantRegs = 0;
6087     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6088       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6089 
6090     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6091     // Don't count the induction variable as interleaved.
6092     if (EnableIndVarRegisterHeur) {
6093       TmpIC =
6094           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6095                         std::max(1U, (MaxLocalUsers - 1)));
6096     }
6097 
6098     IC = std::min(IC, TmpIC);
6099   }
6100 
6101   // Clamp the interleave ranges to reasonable counts.
6102   unsigned MaxInterleaveCount =
6103       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6104 
6105   // Check if the user has overridden the max.
6106   if (VF.isScalar()) {
6107     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6108       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6109   } else {
6110     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6111       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6112   }
6113 
6114   // If trip count is known or estimated compile time constant, limit the
6115   // interleave count to be less than the trip count divided by VF, provided it
6116   // is at least 1.
6117   //
6118   // For scalable vectors we can't know if interleaving is beneficial. It may
6119   // not be beneficial for small loops if none of the lanes in the second vector
6120   // iterations is enabled. However, for larger loops, there is likely to be a
6121   // similar benefit as for fixed-width vectors. For now, we choose to leave
6122   // the InterleaveCount as if vscale is '1', although if some information about
6123   // the vector is known (e.g. min vector size), we can make a better decision.
6124   if (BestKnownTC) {
6125     MaxInterleaveCount =
6126         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6127     // Make sure MaxInterleaveCount is greater than 0.
6128     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6129   }
6130 
6131   assert(MaxInterleaveCount > 0 &&
6132          "Maximum interleave count must be greater than 0");
6133 
6134   // Clamp the calculated IC to be between the 1 and the max interleave count
6135   // that the target and trip count allows.
6136   if (IC > MaxInterleaveCount)
6137     IC = MaxInterleaveCount;
6138   else
6139     // Make sure IC is greater than 0.
6140     IC = std::max(1u, IC);
6141 
6142   assert(IC > 0 && "Interleave count must be greater than 0.");
6143 
6144   // If we did not calculate the cost for VF (because the user selected the VF)
6145   // then we calculate the cost of VF here.
6146   if (LoopCost == 0) {
6147     InstructionCost C = expectedCost(VF).first;
6148     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
6149     LoopCost = *C.getValue();
6150   }
6151 
6152   assert(LoopCost && "Non-zero loop cost expected");
6153 
6154   // Interleave if we vectorized this loop and there is a reduction that could
6155   // benefit from interleaving.
6156   if (VF.isVector() && HasReductions) {
6157     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6158     return IC;
6159   }
6160 
6161   // Note that if we've already vectorized the loop we will have done the
6162   // runtime check and so interleaving won't require further checks.
6163   bool InterleavingRequiresRuntimePointerCheck =
6164       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6165 
6166   // We want to interleave small loops in order to reduce the loop overhead and
6167   // potentially expose ILP opportunities.
6168   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6169                     << "LV: IC is " << IC << '\n'
6170                     << "LV: VF is " << VF << '\n');
6171   const bool AggressivelyInterleaveReductions =
6172       TTI.enableAggressiveInterleaving(HasReductions);
6173   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6174     // We assume that the cost overhead is 1 and we use the cost model
6175     // to estimate the cost of the loop and interleave until the cost of the
6176     // loop overhead is about 5% of the cost of the loop.
6177     unsigned SmallIC =
6178         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6179 
6180     // Interleave until store/load ports (estimated by max interleave count) are
6181     // saturated.
6182     unsigned NumStores = Legal->getNumStores();
6183     unsigned NumLoads = Legal->getNumLoads();
6184     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6185     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6186 
6187     // There is little point in interleaving for reductions containing selects
6188     // and compares when VF=1 since it may just create more overhead than it's
6189     // worth for loops with small trip counts. This is because we still have to
6190     // do the final reduction after the loop.
6191     bool HasSelectCmpReductions =
6192         HasReductions &&
6193         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6194           const RecurrenceDescriptor &RdxDesc = Reduction.second;
6195           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
6196               RdxDesc.getRecurrenceKind());
6197         });
6198     if (HasSelectCmpReductions) {
6199       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
6200       return 1;
6201     }
6202 
6203     // If we have a scalar reduction (vector reductions are already dealt with
6204     // by this point), we can increase the critical path length if the loop
6205     // we're interleaving is inside another loop. For tree-wise reductions
6206     // set the limit to 2, and for ordered reductions it's best to disable
6207     // interleaving entirely.
6208     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6209       bool HasOrderedReductions =
6210           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6211             const RecurrenceDescriptor &RdxDesc = Reduction.second;
6212             return RdxDesc.isOrdered();
6213           });
6214       if (HasOrderedReductions) {
6215         LLVM_DEBUG(
6216             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
6217         return 1;
6218       }
6219 
6220       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6221       SmallIC = std::min(SmallIC, F);
6222       StoresIC = std::min(StoresIC, F);
6223       LoadsIC = std::min(LoadsIC, F);
6224     }
6225 
6226     if (EnableLoadStoreRuntimeInterleave &&
6227         std::max(StoresIC, LoadsIC) > SmallIC) {
6228       LLVM_DEBUG(
6229           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6230       return std::max(StoresIC, LoadsIC);
6231     }
6232 
6233     // If there are scalar reductions and TTI has enabled aggressive
6234     // interleaving for reductions, we will interleave to expose ILP.
6235     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6236         AggressivelyInterleaveReductions) {
6237       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6238       // Interleave no less than SmallIC but not as aggressive as the normal IC
6239       // to satisfy the rare situation when resources are too limited.
6240       return std::max(IC / 2, SmallIC);
6241     } else {
6242       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6243       return SmallIC;
6244     }
6245   }
6246 
6247   // Interleave if this is a large loop (small loops are already dealt with by
6248   // this point) that could benefit from interleaving.
6249   if (AggressivelyInterleaveReductions) {
6250     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6251     return IC;
6252   }
6253 
6254   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6255   return 1;
6256 }
6257 
6258 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6259 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6260   // This function calculates the register usage by measuring the highest number
6261   // of values that are alive at a single location. Obviously, this is a very
6262   // rough estimation. We scan the loop in a topological order in order and
6263   // assign a number to each instruction. We use RPO to ensure that defs are
6264   // met before their users. We assume that each instruction that has in-loop
6265   // users starts an interval. We record every time that an in-loop value is
6266   // used, so we have a list of the first and last occurrences of each
6267   // instruction. Next, we transpose this data structure into a multi map that
6268   // holds the list of intervals that *end* at a specific location. This multi
6269   // map allows us to perform a linear search. We scan the instructions linearly
6270   // and record each time that a new interval starts, by placing it in a set.
6271   // If we find this value in the multi-map then we remove it from the set.
6272   // The max register usage is the maximum size of the set.
6273   // We also search for instructions that are defined outside the loop, but are
6274   // used inside the loop. We need this number separately from the max-interval
6275   // usage number because when we unroll, loop-invariant values do not take
6276   // more register.
6277   LoopBlocksDFS DFS(TheLoop);
6278   DFS.perform(LI);
6279 
6280   RegisterUsage RU;
6281 
6282   // Each 'key' in the map opens a new interval. The values
6283   // of the map are the index of the 'last seen' usage of the
6284   // instruction that is the key.
6285   using IntervalMap = DenseMap<Instruction *, unsigned>;
6286 
6287   // Maps instruction to its index.
6288   SmallVector<Instruction *, 64> IdxToInstr;
6289   // Marks the end of each interval.
6290   IntervalMap EndPoint;
6291   // Saves the list of instruction indices that are used in the loop.
6292   SmallPtrSet<Instruction *, 8> Ends;
6293   // Saves the list of values that are used in the loop but are
6294   // defined outside the loop, such as arguments and constants.
6295   SmallPtrSet<Value *, 8> LoopInvariants;
6296 
6297   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6298     for (Instruction &I : BB->instructionsWithoutDebug()) {
6299       IdxToInstr.push_back(&I);
6300 
6301       // Save the end location of each USE.
6302       for (Value *U : I.operands()) {
6303         auto *Instr = dyn_cast<Instruction>(U);
6304 
6305         // Ignore non-instruction values such as arguments, constants, etc.
6306         if (!Instr)
6307           continue;
6308 
6309         // If this instruction is outside the loop then record it and continue.
6310         if (!TheLoop->contains(Instr)) {
6311           LoopInvariants.insert(Instr);
6312           continue;
6313         }
6314 
6315         // Overwrite previous end points.
6316         EndPoint[Instr] = IdxToInstr.size();
6317         Ends.insert(Instr);
6318       }
6319     }
6320   }
6321 
6322   // Saves the list of intervals that end with the index in 'key'.
6323   using InstrList = SmallVector<Instruction *, 2>;
6324   DenseMap<unsigned, InstrList> TransposeEnds;
6325 
6326   // Transpose the EndPoints to a list of values that end at each index.
6327   for (auto &Interval : EndPoint)
6328     TransposeEnds[Interval.second].push_back(Interval.first);
6329 
6330   SmallPtrSet<Instruction *, 8> OpenIntervals;
6331   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6332   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6333 
6334   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6335 
6336   // A lambda that gets the register usage for the given type and VF.
6337   const auto &TTICapture = TTI;
6338   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6339     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6340       return 0;
6341     InstructionCost::CostType RegUsage =
6342         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6343     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6344            "Nonsensical values for register usage.");
6345     return RegUsage;
6346   };
6347 
6348   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6349     Instruction *I = IdxToInstr[i];
6350 
6351     // Remove all of the instructions that end at this location.
6352     InstrList &List = TransposeEnds[i];
6353     for (Instruction *ToRemove : List)
6354       OpenIntervals.erase(ToRemove);
6355 
6356     // Ignore instructions that are never used within the loop.
6357     if (!Ends.count(I))
6358       continue;
6359 
6360     // Skip ignored values.
6361     if (ValuesToIgnore.count(I))
6362       continue;
6363 
6364     // For each VF find the maximum usage of registers.
6365     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6366       // Count the number of live intervals.
6367       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6368 
6369       if (VFs[j].isScalar()) {
6370         for (auto Inst : OpenIntervals) {
6371           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6372           if (RegUsage.find(ClassID) == RegUsage.end())
6373             RegUsage[ClassID] = 1;
6374           else
6375             RegUsage[ClassID] += 1;
6376         }
6377       } else {
6378         collectUniformsAndScalars(VFs[j]);
6379         for (auto Inst : OpenIntervals) {
6380           // Skip ignored values for VF > 1.
6381           if (VecValuesToIgnore.count(Inst))
6382             continue;
6383           if (isScalarAfterVectorization(Inst, VFs[j])) {
6384             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6385             if (RegUsage.find(ClassID) == RegUsage.end())
6386               RegUsage[ClassID] = 1;
6387             else
6388               RegUsage[ClassID] += 1;
6389           } else {
6390             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6391             if (RegUsage.find(ClassID) == RegUsage.end())
6392               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6393             else
6394               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6395           }
6396         }
6397       }
6398 
6399       for (auto& pair : RegUsage) {
6400         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6401           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6402         else
6403           MaxUsages[j][pair.first] = pair.second;
6404       }
6405     }
6406 
6407     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6408                       << OpenIntervals.size() << '\n');
6409 
6410     // Add the current instruction to the list of open intervals.
6411     OpenIntervals.insert(I);
6412   }
6413 
6414   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6415     SmallMapVector<unsigned, unsigned, 4> Invariant;
6416 
6417     for (auto Inst : LoopInvariants) {
6418       unsigned Usage =
6419           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6420       unsigned ClassID =
6421           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6422       if (Invariant.find(ClassID) == Invariant.end())
6423         Invariant[ClassID] = Usage;
6424       else
6425         Invariant[ClassID] += Usage;
6426     }
6427 
6428     LLVM_DEBUG({
6429       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6430       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6431              << " item\n";
6432       for (const auto &pair : MaxUsages[i]) {
6433         dbgs() << "LV(REG): RegisterClass: "
6434                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6435                << " registers\n";
6436       }
6437       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6438              << " item\n";
6439       for (const auto &pair : Invariant) {
6440         dbgs() << "LV(REG): RegisterClass: "
6441                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6442                << " registers\n";
6443       }
6444     });
6445 
6446     RU.LoopInvariantRegs = Invariant;
6447     RU.MaxLocalUsers = MaxUsages[i];
6448     RUs[i] = RU;
6449   }
6450 
6451   return RUs;
6452 }
6453 
6454 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6455                                                            ElementCount VF) {
6456   // TODO: Cost model for emulated masked load/store is completely
6457   // broken. This hack guides the cost model to use an artificially
6458   // high enough value to practically disable vectorization with such
6459   // operations, except where previously deployed legality hack allowed
6460   // using very low cost values. This is to avoid regressions coming simply
6461   // from moving "masked load/store" check from legality to cost model.
6462   // Masked Load/Gather emulation was previously never allowed.
6463   // Limited number of Masked Store/Scatter emulation was allowed.
6464   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6465   return isa<LoadInst>(I) ||
6466          (isa<StoreInst>(I) &&
6467           NumPredStores > NumberOfStoresToPredicate);
6468 }
6469 
6470 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6471   // If we aren't vectorizing the loop, or if we've already collected the
6472   // instructions to scalarize, there's nothing to do. Collection may already
6473   // have occurred if we have a user-selected VF and are now computing the
6474   // expected cost for interleaving.
6475   if (VF.isScalar() || VF.isZero() ||
6476       InstsToScalarize.find(VF) != InstsToScalarize.end())
6477     return;
6478 
6479   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6480   // not profitable to scalarize any instructions, the presence of VF in the
6481   // map will indicate that we've analyzed it already.
6482   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6483 
6484   // Find all the instructions that are scalar with predication in the loop and
6485   // determine if it would be better to not if-convert the blocks they are in.
6486   // If so, we also record the instructions to scalarize.
6487   for (BasicBlock *BB : TheLoop->blocks()) {
6488     if (!blockNeedsPredicationForAnyReason(BB))
6489       continue;
6490     for (Instruction &I : *BB)
6491       if (isScalarWithPredication(&I, VF)) {
6492         ScalarCostsTy ScalarCosts;
6493         // Do not apply discount if scalable, because that would lead to
6494         // invalid scalarization costs.
6495         // Do not apply discount logic if hacked cost is needed
6496         // for emulated masked memrefs.
6497         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6498             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6499           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6500         // Remember that BB will remain after vectorization.
6501         PredicatedBBsAfterVectorization.insert(BB);
6502       }
6503   }
6504 }
6505 
6506 int LoopVectorizationCostModel::computePredInstDiscount(
6507     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6508   assert(!isUniformAfterVectorization(PredInst, VF) &&
6509          "Instruction marked uniform-after-vectorization will be predicated");
6510 
6511   // Initialize the discount to zero, meaning that the scalar version and the
6512   // vector version cost the same.
6513   InstructionCost Discount = 0;
6514 
6515   // Holds instructions to analyze. The instructions we visit are mapped in
6516   // ScalarCosts. Those instructions are the ones that would be scalarized if
6517   // we find that the scalar version costs less.
6518   SmallVector<Instruction *, 8> Worklist;
6519 
6520   // Returns true if the given instruction can be scalarized.
6521   auto canBeScalarized = [&](Instruction *I) -> bool {
6522     // We only attempt to scalarize instructions forming a single-use chain
6523     // from the original predicated block that would otherwise be vectorized.
6524     // Although not strictly necessary, we give up on instructions we know will
6525     // already be scalar to avoid traversing chains that are unlikely to be
6526     // beneficial.
6527     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6528         isScalarAfterVectorization(I, VF))
6529       return false;
6530 
6531     // If the instruction is scalar with predication, it will be analyzed
6532     // separately. We ignore it within the context of PredInst.
6533     if (isScalarWithPredication(I, VF))
6534       return false;
6535 
6536     // If any of the instruction's operands are uniform after vectorization,
6537     // the instruction cannot be scalarized. This prevents, for example, a
6538     // masked load from being scalarized.
6539     //
6540     // We assume we will only emit a value for lane zero of an instruction
6541     // marked uniform after vectorization, rather than VF identical values.
6542     // Thus, if we scalarize an instruction that uses a uniform, we would
6543     // create uses of values corresponding to the lanes we aren't emitting code
6544     // for. This behavior can be changed by allowing getScalarValue to clone
6545     // the lane zero values for uniforms rather than asserting.
6546     for (Use &U : I->operands())
6547       if (auto *J = dyn_cast<Instruction>(U.get()))
6548         if (isUniformAfterVectorization(J, VF))
6549           return false;
6550 
6551     // Otherwise, we can scalarize the instruction.
6552     return true;
6553   };
6554 
6555   // Compute the expected cost discount from scalarizing the entire expression
6556   // feeding the predicated instruction. We currently only consider expressions
6557   // that are single-use instruction chains.
6558   Worklist.push_back(PredInst);
6559   while (!Worklist.empty()) {
6560     Instruction *I = Worklist.pop_back_val();
6561 
6562     // If we've already analyzed the instruction, there's nothing to do.
6563     if (ScalarCosts.find(I) != ScalarCosts.end())
6564       continue;
6565 
6566     // Compute the cost of the vector instruction. Note that this cost already
6567     // includes the scalarization overhead of the predicated instruction.
6568     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6569 
6570     // Compute the cost of the scalarized instruction. This cost is the cost of
6571     // the instruction as if it wasn't if-converted and instead remained in the
6572     // predicated block. We will scale this cost by block probability after
6573     // computing the scalarization overhead.
6574     InstructionCost ScalarCost =
6575         VF.getFixedValue() *
6576         getInstructionCost(I, ElementCount::getFixed(1)).first;
6577 
6578     // Compute the scalarization overhead of needed insertelement instructions
6579     // and phi nodes.
6580     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6581       ScalarCost += TTI.getScalarizationOverhead(
6582           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6583           APInt::getAllOnes(VF.getFixedValue()), true, false);
6584       ScalarCost +=
6585           VF.getFixedValue() *
6586           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6587     }
6588 
6589     // Compute the scalarization overhead of needed extractelement
6590     // instructions. For each of the instruction's operands, if the operand can
6591     // be scalarized, add it to the worklist; otherwise, account for the
6592     // overhead.
6593     for (Use &U : I->operands())
6594       if (auto *J = dyn_cast<Instruction>(U.get())) {
6595         assert(VectorType::isValidElementType(J->getType()) &&
6596                "Instruction has non-scalar type");
6597         if (canBeScalarized(J))
6598           Worklist.push_back(J);
6599         else if (needsExtract(J, VF)) {
6600           ScalarCost += TTI.getScalarizationOverhead(
6601               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6602               APInt::getAllOnes(VF.getFixedValue()), false, true);
6603         }
6604       }
6605 
6606     // Scale the total scalar cost by block probability.
6607     ScalarCost /= getReciprocalPredBlockProb();
6608 
6609     // Compute the discount. A non-negative discount means the vector version
6610     // of the instruction costs more, and scalarizing would be beneficial.
6611     Discount += VectorCost - ScalarCost;
6612     ScalarCosts[I] = ScalarCost;
6613   }
6614 
6615   return *Discount.getValue();
6616 }
6617 
6618 LoopVectorizationCostModel::VectorizationCostTy
6619 LoopVectorizationCostModel::expectedCost(
6620     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6621   VectorizationCostTy Cost;
6622 
6623   // For each block.
6624   for (BasicBlock *BB : TheLoop->blocks()) {
6625     VectorizationCostTy BlockCost;
6626 
6627     // For each instruction in the old loop.
6628     for (Instruction &I : BB->instructionsWithoutDebug()) {
6629       // Skip ignored values.
6630       if (ValuesToIgnore.count(&I) ||
6631           (VF.isVector() && VecValuesToIgnore.count(&I)))
6632         continue;
6633 
6634       VectorizationCostTy C = getInstructionCost(&I, VF);
6635 
6636       // Check if we should override the cost.
6637       if (C.first.isValid() &&
6638           ForceTargetInstructionCost.getNumOccurrences() > 0)
6639         C.first = InstructionCost(ForceTargetInstructionCost);
6640 
6641       // Keep a list of instructions with invalid costs.
6642       if (Invalid && !C.first.isValid())
6643         Invalid->emplace_back(&I, VF);
6644 
6645       BlockCost.first += C.first;
6646       BlockCost.second |= C.second;
6647       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6648                         << " for VF " << VF << " For instruction: " << I
6649                         << '\n');
6650     }
6651 
6652     // If we are vectorizing a predicated block, it will have been
6653     // if-converted. This means that the block's instructions (aside from
6654     // stores and instructions that may divide by zero) will now be
6655     // unconditionally executed. For the scalar case, we may not always execute
6656     // the predicated block, if it is an if-else block. Thus, scale the block's
6657     // cost by the probability of executing it. blockNeedsPredication from
6658     // Legal is used so as to not include all blocks in tail folded loops.
6659     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6660       BlockCost.first /= getReciprocalPredBlockProb();
6661 
6662     Cost.first += BlockCost.first;
6663     Cost.second |= BlockCost.second;
6664   }
6665 
6666   return Cost;
6667 }
6668 
6669 /// Gets Address Access SCEV after verifying that the access pattern
6670 /// is loop invariant except the induction variable dependence.
6671 ///
6672 /// This SCEV can be sent to the Target in order to estimate the address
6673 /// calculation cost.
6674 static const SCEV *getAddressAccessSCEV(
6675               Value *Ptr,
6676               LoopVectorizationLegality *Legal,
6677               PredicatedScalarEvolution &PSE,
6678               const Loop *TheLoop) {
6679 
6680   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6681   if (!Gep)
6682     return nullptr;
6683 
6684   // We are looking for a gep with all loop invariant indices except for one
6685   // which should be an induction variable.
6686   auto SE = PSE.getSE();
6687   unsigned NumOperands = Gep->getNumOperands();
6688   for (unsigned i = 1; i < NumOperands; ++i) {
6689     Value *Opd = Gep->getOperand(i);
6690     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6691         !Legal->isInductionVariable(Opd))
6692       return nullptr;
6693   }
6694 
6695   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6696   return PSE.getSCEV(Ptr);
6697 }
6698 
6699 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6700   return Legal->hasStride(I->getOperand(0)) ||
6701          Legal->hasStride(I->getOperand(1));
6702 }
6703 
6704 InstructionCost
6705 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6706                                                         ElementCount VF) {
6707   assert(VF.isVector() &&
6708          "Scalarization cost of instruction implies vectorization.");
6709   if (VF.isScalable())
6710     return InstructionCost::getInvalid();
6711 
6712   Type *ValTy = getLoadStoreType(I);
6713   auto SE = PSE.getSE();
6714 
6715   unsigned AS = getLoadStoreAddressSpace(I);
6716   Value *Ptr = getLoadStorePointerOperand(I);
6717   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6718   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6719   //       that it is being called from this specific place.
6720 
6721   // Figure out whether the access is strided and get the stride value
6722   // if it's known in compile time
6723   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6724 
6725   // Get the cost of the scalar memory instruction and address computation.
6726   InstructionCost Cost =
6727       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6728 
6729   // Don't pass *I here, since it is scalar but will actually be part of a
6730   // vectorized loop where the user of it is a vectorized instruction.
6731   const Align Alignment = getLoadStoreAlignment(I);
6732   Cost += VF.getKnownMinValue() *
6733           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6734                               AS, TTI::TCK_RecipThroughput);
6735 
6736   // Get the overhead of the extractelement and insertelement instructions
6737   // we might create due to scalarization.
6738   Cost += getScalarizationOverhead(I, VF);
6739 
6740   // If we have a predicated load/store, it will need extra i1 extracts and
6741   // conditional branches, but may not be executed for each vector lane. Scale
6742   // the cost by the probability of executing the predicated block.
6743   if (isPredicatedInst(I, VF)) {
6744     Cost /= getReciprocalPredBlockProb();
6745 
6746     // Add the cost of an i1 extract and a branch
6747     auto *Vec_i1Ty =
6748         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6749     Cost += TTI.getScalarizationOverhead(
6750         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6751         /*Insert=*/false, /*Extract=*/true);
6752     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6753 
6754     if (useEmulatedMaskMemRefHack(I, VF))
6755       // Artificially setting to a high enough value to practically disable
6756       // vectorization with such operations.
6757       Cost = 3000000;
6758   }
6759 
6760   return Cost;
6761 }
6762 
6763 InstructionCost
6764 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6765                                                     ElementCount VF) {
6766   Type *ValTy = getLoadStoreType(I);
6767   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6768   Value *Ptr = getLoadStorePointerOperand(I);
6769   unsigned AS = getLoadStoreAddressSpace(I);
6770   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6771   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6772 
6773   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6774          "Stride should be 1 or -1 for consecutive memory access");
6775   const Align Alignment = getLoadStoreAlignment(I);
6776   InstructionCost Cost = 0;
6777   if (Legal->isMaskRequired(I))
6778     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6779                                       CostKind);
6780   else
6781     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6782                                 CostKind, I);
6783 
6784   bool Reverse = ConsecutiveStride < 0;
6785   if (Reverse)
6786     Cost +=
6787         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6788   return Cost;
6789 }
6790 
6791 InstructionCost
6792 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6793                                                 ElementCount VF) {
6794   assert(Legal->isUniformMemOp(*I));
6795 
6796   Type *ValTy = getLoadStoreType(I);
6797   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6798   const Align Alignment = getLoadStoreAlignment(I);
6799   unsigned AS = getLoadStoreAddressSpace(I);
6800   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6801   if (isa<LoadInst>(I)) {
6802     return TTI.getAddressComputationCost(ValTy) +
6803            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6804                                CostKind) +
6805            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6806   }
6807   StoreInst *SI = cast<StoreInst>(I);
6808 
6809   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6810   return TTI.getAddressComputationCost(ValTy) +
6811          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6812                              CostKind) +
6813          (isLoopInvariantStoreValue
6814               ? 0
6815               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6816                                        VF.getKnownMinValue() - 1));
6817 }
6818 
6819 InstructionCost
6820 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6821                                                  ElementCount VF) {
6822   Type *ValTy = getLoadStoreType(I);
6823   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6824   const Align Alignment = getLoadStoreAlignment(I);
6825   const Value *Ptr = getLoadStorePointerOperand(I);
6826 
6827   return TTI.getAddressComputationCost(VectorTy) +
6828          TTI.getGatherScatterOpCost(
6829              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6830              TargetTransformInfo::TCK_RecipThroughput, I);
6831 }
6832 
6833 InstructionCost
6834 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6835                                                    ElementCount VF) {
6836   // TODO: Once we have support for interleaving with scalable vectors
6837   // we can calculate the cost properly here.
6838   if (VF.isScalable())
6839     return InstructionCost::getInvalid();
6840 
6841   Type *ValTy = getLoadStoreType(I);
6842   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6843   unsigned AS = getLoadStoreAddressSpace(I);
6844 
6845   auto Group = getInterleavedAccessGroup(I);
6846   assert(Group && "Fail to get an interleaved access group.");
6847 
6848   unsigned InterleaveFactor = Group->getFactor();
6849   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6850 
6851   // Holds the indices of existing members in the interleaved group.
6852   SmallVector<unsigned, 4> Indices;
6853   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6854     if (Group->getMember(IF))
6855       Indices.push_back(IF);
6856 
6857   // Calculate the cost of the whole interleaved group.
6858   bool UseMaskForGaps =
6859       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6860       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6861   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6862       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6863       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6864 
6865   if (Group->isReverse()) {
6866     // TODO: Add support for reversed masked interleaved access.
6867     assert(!Legal->isMaskRequired(I) &&
6868            "Reverse masked interleaved access not supported.");
6869     Cost +=
6870         Group->getNumMembers() *
6871         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6872   }
6873   return Cost;
6874 }
6875 
6876 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6877     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6878   using namespace llvm::PatternMatch;
6879   // Early exit for no inloop reductions
6880   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6881     return None;
6882   auto *VectorTy = cast<VectorType>(Ty);
6883 
6884   // We are looking for a pattern of, and finding the minimal acceptable cost:
6885   //  reduce(mul(ext(A), ext(B))) or
6886   //  reduce(mul(A, B)) or
6887   //  reduce(ext(A)) or
6888   //  reduce(A).
6889   // The basic idea is that we walk down the tree to do that, finding the root
6890   // reduction instruction in InLoopReductionImmediateChains. From there we find
6891   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6892   // of the components. If the reduction cost is lower then we return it for the
6893   // reduction instruction and 0 for the other instructions in the pattern. If
6894   // it is not we return an invalid cost specifying the orignal cost method
6895   // should be used.
6896   Instruction *RetI = I;
6897   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6898     if (!RetI->hasOneUser())
6899       return None;
6900     RetI = RetI->user_back();
6901   }
6902   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6903       RetI->user_back()->getOpcode() == Instruction::Add) {
6904     if (!RetI->hasOneUser())
6905       return None;
6906     RetI = RetI->user_back();
6907   }
6908 
6909   // Test if the found instruction is a reduction, and if not return an invalid
6910   // cost specifying the parent to use the original cost modelling.
6911   if (!InLoopReductionImmediateChains.count(RetI))
6912     return None;
6913 
6914   // Find the reduction this chain is a part of and calculate the basic cost of
6915   // the reduction on its own.
6916   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6917   Instruction *ReductionPhi = LastChain;
6918   while (!isa<PHINode>(ReductionPhi))
6919     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6920 
6921   const RecurrenceDescriptor &RdxDesc =
6922       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6923 
6924   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6925       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6926 
6927   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6928   // normal fmul instruction to the cost of the fadd reduction.
6929   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6930     BaseCost +=
6931         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6932 
6933   // If we're using ordered reductions then we can just return the base cost
6934   // here, since getArithmeticReductionCost calculates the full ordered
6935   // reduction cost when FP reassociation is not allowed.
6936   if (useOrderedReductions(RdxDesc))
6937     return BaseCost;
6938 
6939   // Get the operand that was not the reduction chain and match it to one of the
6940   // patterns, returning the better cost if it is found.
6941   Instruction *RedOp = RetI->getOperand(1) == LastChain
6942                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6943                            : dyn_cast<Instruction>(RetI->getOperand(1));
6944 
6945   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6946 
6947   Instruction *Op0, *Op1;
6948   if (RedOp &&
6949       match(RedOp,
6950             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6951       match(Op0, m_ZExtOrSExt(m_Value())) &&
6952       Op0->getOpcode() == Op1->getOpcode() &&
6953       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6954       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6955       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6956 
6957     // Matched reduce(ext(mul(ext(A), ext(B)))
6958     // Note that the extend opcodes need to all match, or if A==B they will have
6959     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6960     // which is equally fine.
6961     bool IsUnsigned = isa<ZExtInst>(Op0);
6962     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6963     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6964 
6965     InstructionCost ExtCost =
6966         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6967                              TTI::CastContextHint::None, CostKind, Op0);
6968     InstructionCost MulCost =
6969         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6970     InstructionCost Ext2Cost =
6971         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6972                              TTI::CastContextHint::None, CostKind, RedOp);
6973 
6974     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6975         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6976         CostKind);
6977 
6978     if (RedCost.isValid() &&
6979         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6980       return I == RetI ? RedCost : 0;
6981   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6982              !TheLoop->isLoopInvariant(RedOp)) {
6983     // Matched reduce(ext(A))
6984     bool IsUnsigned = isa<ZExtInst>(RedOp);
6985     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6986     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6987         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6988         CostKind);
6989 
6990     InstructionCost ExtCost =
6991         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6992                              TTI::CastContextHint::None, CostKind, RedOp);
6993     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6994       return I == RetI ? RedCost : 0;
6995   } else if (RedOp &&
6996              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6997     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6998         Op0->getOpcode() == Op1->getOpcode() &&
6999         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7000       bool IsUnsigned = isa<ZExtInst>(Op0);
7001       Type *Op0Ty = Op0->getOperand(0)->getType();
7002       Type *Op1Ty = Op1->getOperand(0)->getType();
7003       Type *LargestOpTy =
7004           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
7005                                                                     : Op0Ty;
7006       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
7007 
7008       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
7009       // different sizes. We take the largest type as the ext to reduce, and add
7010       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
7011       InstructionCost ExtCost0 = TTI.getCastInstrCost(
7012           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
7013           TTI::CastContextHint::None, CostKind, Op0);
7014       InstructionCost ExtCost1 = TTI.getCastInstrCost(
7015           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
7016           TTI::CastContextHint::None, CostKind, Op1);
7017       InstructionCost MulCost =
7018           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7019 
7020       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7021           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7022           CostKind);
7023       InstructionCost ExtraExtCost = 0;
7024       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
7025         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
7026         ExtraExtCost = TTI.getCastInstrCost(
7027             ExtraExtOp->getOpcode(), ExtType,
7028             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
7029             TTI::CastContextHint::None, CostKind, ExtraExtOp);
7030       }
7031 
7032       if (RedCost.isValid() &&
7033           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
7034         return I == RetI ? RedCost : 0;
7035     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
7036       // Matched reduce(mul())
7037       InstructionCost MulCost =
7038           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7039 
7040       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7041           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7042           CostKind);
7043 
7044       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7045         return I == RetI ? RedCost : 0;
7046     }
7047   }
7048 
7049   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
7050 }
7051 
7052 InstructionCost
7053 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7054                                                      ElementCount VF) {
7055   // Calculate scalar cost only. Vectorization cost should be ready at this
7056   // moment.
7057   if (VF.isScalar()) {
7058     Type *ValTy = getLoadStoreType(I);
7059     const Align Alignment = getLoadStoreAlignment(I);
7060     unsigned AS = getLoadStoreAddressSpace(I);
7061 
7062     return TTI.getAddressComputationCost(ValTy) +
7063            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7064                                TTI::TCK_RecipThroughput, I);
7065   }
7066   return getWideningCost(I, VF);
7067 }
7068 
7069 LoopVectorizationCostModel::VectorizationCostTy
7070 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7071                                                ElementCount VF) {
7072   // If we know that this instruction will remain uniform, check the cost of
7073   // the scalar version.
7074   if (isUniformAfterVectorization(I, VF))
7075     VF = ElementCount::getFixed(1);
7076 
7077   if (VF.isVector() && isProfitableToScalarize(I, VF))
7078     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7079 
7080   // Forced scalars do not have any scalarization overhead.
7081   auto ForcedScalar = ForcedScalars.find(VF);
7082   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7083     auto InstSet = ForcedScalar->second;
7084     if (InstSet.count(I))
7085       return VectorizationCostTy(
7086           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7087            VF.getKnownMinValue()),
7088           false);
7089   }
7090 
7091   Type *VectorTy;
7092   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7093 
7094   bool TypeNotScalarized = false;
7095   if (VF.isVector() && VectorTy->isVectorTy()) {
7096     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
7097     if (NumParts)
7098       TypeNotScalarized = NumParts < VF.getKnownMinValue();
7099     else
7100       C = InstructionCost::getInvalid();
7101   }
7102   return VectorizationCostTy(C, TypeNotScalarized);
7103 }
7104 
7105 InstructionCost
7106 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7107                                                      ElementCount VF) const {
7108 
7109   // There is no mechanism yet to create a scalable scalarization loop,
7110   // so this is currently Invalid.
7111   if (VF.isScalable())
7112     return InstructionCost::getInvalid();
7113 
7114   if (VF.isScalar())
7115     return 0;
7116 
7117   InstructionCost Cost = 0;
7118   Type *RetTy = ToVectorTy(I->getType(), VF);
7119   if (!RetTy->isVoidTy() &&
7120       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7121     Cost += TTI.getScalarizationOverhead(
7122         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
7123         false);
7124 
7125   // Some targets keep addresses scalar.
7126   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7127     return Cost;
7128 
7129   // Some targets support efficient element stores.
7130   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7131     return Cost;
7132 
7133   // Collect operands to consider.
7134   CallInst *CI = dyn_cast<CallInst>(I);
7135   Instruction::op_range Ops = CI ? CI->args() : I->operands();
7136 
7137   // Skip operands that do not require extraction/scalarization and do not incur
7138   // any overhead.
7139   SmallVector<Type *> Tys;
7140   for (auto *V : filterExtractingOperands(Ops, VF))
7141     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7142   return Cost + TTI.getOperandsScalarizationOverhead(
7143                     filterExtractingOperands(Ops, VF), Tys);
7144 }
7145 
7146 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7147   if (VF.isScalar())
7148     return;
7149   NumPredStores = 0;
7150   for (BasicBlock *BB : TheLoop->blocks()) {
7151     // For each instruction in the old loop.
7152     for (Instruction &I : *BB) {
7153       Value *Ptr =  getLoadStorePointerOperand(&I);
7154       if (!Ptr)
7155         continue;
7156 
7157       // TODO: We should generate better code and update the cost model for
7158       // predicated uniform stores. Today they are treated as any other
7159       // predicated store (see added test cases in
7160       // invariant-store-vectorization.ll).
7161       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
7162         NumPredStores++;
7163 
7164       if (Legal->isUniformMemOp(I)) {
7165         // TODO: Avoid replicating loads and stores instead of
7166         // relying on instcombine to remove them.
7167         // Load: Scalar load + broadcast
7168         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7169         InstructionCost Cost;
7170         if (isa<StoreInst>(&I) && VF.isScalable() &&
7171             isLegalGatherOrScatter(&I, VF)) {
7172           Cost = getGatherScatterCost(&I, VF);
7173           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7174         } else {
7175           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7176                  "Cannot yet scalarize uniform stores");
7177           Cost = getUniformMemOpCost(&I, VF);
7178           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7179         }
7180         continue;
7181       }
7182 
7183       // We assume that widening is the best solution when possible.
7184       if (memoryInstructionCanBeWidened(&I, VF)) {
7185         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7186         int ConsecutiveStride = Legal->isConsecutivePtr(
7187             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
7188         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7189                "Expected consecutive stride.");
7190         InstWidening Decision =
7191             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7192         setWideningDecision(&I, VF, Decision, Cost);
7193         continue;
7194       }
7195 
7196       // Choose between Interleaving, Gather/Scatter or Scalarization.
7197       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7198       unsigned NumAccesses = 1;
7199       if (isAccessInterleaved(&I)) {
7200         auto Group = getInterleavedAccessGroup(&I);
7201         assert(Group && "Fail to get an interleaved access group.");
7202 
7203         // Make one decision for the whole group.
7204         if (getWideningDecision(&I, VF) != CM_Unknown)
7205           continue;
7206 
7207         NumAccesses = Group->getNumMembers();
7208         if (interleavedAccessCanBeWidened(&I, VF))
7209           InterleaveCost = getInterleaveGroupCost(&I, VF);
7210       }
7211 
7212       InstructionCost GatherScatterCost =
7213           isLegalGatherOrScatter(&I, VF)
7214               ? getGatherScatterCost(&I, VF) * NumAccesses
7215               : InstructionCost::getInvalid();
7216 
7217       InstructionCost ScalarizationCost =
7218           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7219 
7220       // Choose better solution for the current VF,
7221       // write down this decision and use it during vectorization.
7222       InstructionCost Cost;
7223       InstWidening Decision;
7224       if (InterleaveCost <= GatherScatterCost &&
7225           InterleaveCost < ScalarizationCost) {
7226         Decision = CM_Interleave;
7227         Cost = InterleaveCost;
7228       } else if (GatherScatterCost < ScalarizationCost) {
7229         Decision = CM_GatherScatter;
7230         Cost = GatherScatterCost;
7231       } else {
7232         Decision = CM_Scalarize;
7233         Cost = ScalarizationCost;
7234       }
7235       // If the instructions belongs to an interleave group, the whole group
7236       // receives the same decision. The whole group receives the cost, but
7237       // the cost will actually be assigned to one instruction.
7238       if (auto Group = getInterleavedAccessGroup(&I))
7239         setWideningDecision(Group, VF, Decision, Cost);
7240       else
7241         setWideningDecision(&I, VF, Decision, Cost);
7242     }
7243   }
7244 
7245   // Make sure that any load of address and any other address computation
7246   // remains scalar unless there is gather/scatter support. This avoids
7247   // inevitable extracts into address registers, and also has the benefit of
7248   // activating LSR more, since that pass can't optimize vectorized
7249   // addresses.
7250   if (TTI.prefersVectorizedAddressing())
7251     return;
7252 
7253   // Start with all scalar pointer uses.
7254   SmallPtrSet<Instruction *, 8> AddrDefs;
7255   for (BasicBlock *BB : TheLoop->blocks())
7256     for (Instruction &I : *BB) {
7257       Instruction *PtrDef =
7258         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7259       if (PtrDef && TheLoop->contains(PtrDef) &&
7260           getWideningDecision(&I, VF) != CM_GatherScatter)
7261         AddrDefs.insert(PtrDef);
7262     }
7263 
7264   // Add all instructions used to generate the addresses.
7265   SmallVector<Instruction *, 4> Worklist;
7266   append_range(Worklist, AddrDefs);
7267   while (!Worklist.empty()) {
7268     Instruction *I = Worklist.pop_back_val();
7269     for (auto &Op : I->operands())
7270       if (auto *InstOp = dyn_cast<Instruction>(Op))
7271         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7272             AddrDefs.insert(InstOp).second)
7273           Worklist.push_back(InstOp);
7274   }
7275 
7276   for (auto *I : AddrDefs) {
7277     if (isa<LoadInst>(I)) {
7278       // Setting the desired widening decision should ideally be handled in
7279       // by cost functions, but since this involves the task of finding out
7280       // if the loaded register is involved in an address computation, it is
7281       // instead changed here when we know this is the case.
7282       InstWidening Decision = getWideningDecision(I, VF);
7283       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7284         // Scalarize a widened load of address.
7285         setWideningDecision(
7286             I, VF, CM_Scalarize,
7287             (VF.getKnownMinValue() *
7288              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7289       else if (auto Group = getInterleavedAccessGroup(I)) {
7290         // Scalarize an interleave group of address loads.
7291         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7292           if (Instruction *Member = Group->getMember(I))
7293             setWideningDecision(
7294                 Member, VF, CM_Scalarize,
7295                 (VF.getKnownMinValue() *
7296                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7297         }
7298       }
7299     } else
7300       // Make sure I gets scalarized and a cost estimate without
7301       // scalarization overhead.
7302       ForcedScalars[VF].insert(I);
7303   }
7304 }
7305 
7306 InstructionCost
7307 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7308                                                Type *&VectorTy) {
7309   Type *RetTy = I->getType();
7310   if (canTruncateToMinimalBitwidth(I, VF))
7311     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7312   auto SE = PSE.getSE();
7313   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7314 
7315   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7316                                                 ElementCount VF) -> bool {
7317     if (VF.isScalar())
7318       return true;
7319 
7320     auto Scalarized = InstsToScalarize.find(VF);
7321     assert(Scalarized != InstsToScalarize.end() &&
7322            "VF not yet analyzed for scalarization profitability");
7323     return !Scalarized->second.count(I) &&
7324            llvm::all_of(I->users(), [&](User *U) {
7325              auto *UI = cast<Instruction>(U);
7326              return !Scalarized->second.count(UI);
7327            });
7328   };
7329   (void) hasSingleCopyAfterVectorization;
7330 
7331   if (isScalarAfterVectorization(I, VF)) {
7332     // With the exception of GEPs and PHIs, after scalarization there should
7333     // only be one copy of the instruction generated in the loop. This is
7334     // because the VF is either 1, or any instructions that need scalarizing
7335     // have already been dealt with by the the time we get here. As a result,
7336     // it means we don't have to multiply the instruction cost by VF.
7337     assert(I->getOpcode() == Instruction::GetElementPtr ||
7338            I->getOpcode() == Instruction::PHI ||
7339            (I->getOpcode() == Instruction::BitCast &&
7340             I->getType()->isPointerTy()) ||
7341            hasSingleCopyAfterVectorization(I, VF));
7342     VectorTy = RetTy;
7343   } else
7344     VectorTy = ToVectorTy(RetTy, VF);
7345 
7346   // TODO: We need to estimate the cost of intrinsic calls.
7347   switch (I->getOpcode()) {
7348   case Instruction::GetElementPtr:
7349     // We mark this instruction as zero-cost because the cost of GEPs in
7350     // vectorized code depends on whether the corresponding memory instruction
7351     // is scalarized or not. Therefore, we handle GEPs with the memory
7352     // instruction cost.
7353     return 0;
7354   case Instruction::Br: {
7355     // In cases of scalarized and predicated instructions, there will be VF
7356     // predicated blocks in the vectorized loop. Each branch around these
7357     // blocks requires also an extract of its vector compare i1 element.
7358     bool ScalarPredicatedBB = false;
7359     BranchInst *BI = cast<BranchInst>(I);
7360     if (VF.isVector() && BI->isConditional() &&
7361         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7362          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7363       ScalarPredicatedBB = true;
7364 
7365     if (ScalarPredicatedBB) {
7366       // Not possible to scalarize scalable vector with predicated instructions.
7367       if (VF.isScalable())
7368         return InstructionCost::getInvalid();
7369       // Return cost for branches around scalarized and predicated blocks.
7370       auto *Vec_i1Ty =
7371           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7372       return (
7373           TTI.getScalarizationOverhead(
7374               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7375           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7376     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7377       // The back-edge branch will remain, as will all scalar branches.
7378       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7379     else
7380       // This branch will be eliminated by if-conversion.
7381       return 0;
7382     // Note: We currently assume zero cost for an unconditional branch inside
7383     // a predicated block since it will become a fall-through, although we
7384     // may decide in the future to call TTI for all branches.
7385   }
7386   case Instruction::PHI: {
7387     auto *Phi = cast<PHINode>(I);
7388 
7389     // First-order recurrences are replaced by vector shuffles inside the loop.
7390     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7391     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7392       return TTI.getShuffleCost(
7393           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7394           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7395 
7396     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7397     // converted into select instructions. We require N - 1 selects per phi
7398     // node, where N is the number of incoming values.
7399     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7400       return (Phi->getNumIncomingValues() - 1) *
7401              TTI.getCmpSelInstrCost(
7402                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7403                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7404                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7405 
7406     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7407   }
7408   case Instruction::UDiv:
7409   case Instruction::SDiv:
7410   case Instruction::URem:
7411   case Instruction::SRem:
7412     // If we have a predicated instruction, it may not be executed for each
7413     // vector lane. Get the scalarization cost and scale this amount by the
7414     // probability of executing the predicated block. If the instruction is not
7415     // predicated, we fall through to the next case.
7416     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7417       InstructionCost Cost = 0;
7418 
7419       // These instructions have a non-void type, so account for the phi nodes
7420       // that we will create. This cost is likely to be zero. The phi node
7421       // cost, if any, should be scaled by the block probability because it
7422       // models a copy at the end of each predicated block.
7423       Cost += VF.getKnownMinValue() *
7424               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7425 
7426       // The cost of the non-predicated instruction.
7427       Cost += VF.getKnownMinValue() *
7428               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7429 
7430       // The cost of insertelement and extractelement instructions needed for
7431       // scalarization.
7432       Cost += getScalarizationOverhead(I, VF);
7433 
7434       // Scale the cost by the probability of executing the predicated blocks.
7435       // This assumes the predicated block for each vector lane is equally
7436       // likely.
7437       return Cost / getReciprocalPredBlockProb();
7438     }
7439     LLVM_FALLTHROUGH;
7440   case Instruction::Add:
7441   case Instruction::FAdd:
7442   case Instruction::Sub:
7443   case Instruction::FSub:
7444   case Instruction::Mul:
7445   case Instruction::FMul:
7446   case Instruction::FDiv:
7447   case Instruction::FRem:
7448   case Instruction::Shl:
7449   case Instruction::LShr:
7450   case Instruction::AShr:
7451   case Instruction::And:
7452   case Instruction::Or:
7453   case Instruction::Xor: {
7454     // Since we will replace the stride by 1 the multiplication should go away.
7455     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7456       return 0;
7457 
7458     // Detect reduction patterns
7459     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7460       return *RedCost;
7461 
7462     // Certain instructions can be cheaper to vectorize if they have a constant
7463     // second vector operand. One example of this are shifts on x86.
7464     Value *Op2 = I->getOperand(1);
7465     TargetTransformInfo::OperandValueProperties Op2VP;
7466     TargetTransformInfo::OperandValueKind Op2VK =
7467         TTI.getOperandInfo(Op2, Op2VP);
7468     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7469       Op2VK = TargetTransformInfo::OK_UniformValue;
7470 
7471     SmallVector<const Value *, 4> Operands(I->operand_values());
7472     return TTI.getArithmeticInstrCost(
7473         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7474         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7475   }
7476   case Instruction::FNeg: {
7477     return TTI.getArithmeticInstrCost(
7478         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7479         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7480         TargetTransformInfo::OP_None, I->getOperand(0), I);
7481   }
7482   case Instruction::Select: {
7483     SelectInst *SI = cast<SelectInst>(I);
7484     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7485     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7486 
7487     const Value *Op0, *Op1;
7488     using namespace llvm::PatternMatch;
7489     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7490                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7491       // select x, y, false --> x & y
7492       // select x, true, y --> x | y
7493       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7494       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7495       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7496       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7497       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7498               Op1->getType()->getScalarSizeInBits() == 1);
7499 
7500       SmallVector<const Value *, 2> Operands{Op0, Op1};
7501       return TTI.getArithmeticInstrCost(
7502           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7503           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7504     }
7505 
7506     Type *CondTy = SI->getCondition()->getType();
7507     if (!ScalarCond)
7508       CondTy = VectorType::get(CondTy, VF);
7509 
7510     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7511     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7512       Pred = Cmp->getPredicate();
7513     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7514                                   CostKind, I);
7515   }
7516   case Instruction::ICmp:
7517   case Instruction::FCmp: {
7518     Type *ValTy = I->getOperand(0)->getType();
7519     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7520     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7521       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7522     VectorTy = ToVectorTy(ValTy, VF);
7523     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7524                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7525                                   I);
7526   }
7527   case Instruction::Store:
7528   case Instruction::Load: {
7529     ElementCount Width = VF;
7530     if (Width.isVector()) {
7531       InstWidening Decision = getWideningDecision(I, Width);
7532       assert(Decision != CM_Unknown &&
7533              "CM decision should be taken at this point");
7534       if (Decision == CM_Scalarize)
7535         Width = ElementCount::getFixed(1);
7536     }
7537     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7538     return getMemoryInstructionCost(I, VF);
7539   }
7540   case Instruction::BitCast:
7541     if (I->getType()->isPointerTy())
7542       return 0;
7543     LLVM_FALLTHROUGH;
7544   case Instruction::ZExt:
7545   case Instruction::SExt:
7546   case Instruction::FPToUI:
7547   case Instruction::FPToSI:
7548   case Instruction::FPExt:
7549   case Instruction::PtrToInt:
7550   case Instruction::IntToPtr:
7551   case Instruction::SIToFP:
7552   case Instruction::UIToFP:
7553   case Instruction::Trunc:
7554   case Instruction::FPTrunc: {
7555     // Computes the CastContextHint from a Load/Store instruction.
7556     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7557       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7558              "Expected a load or a store!");
7559 
7560       if (VF.isScalar() || !TheLoop->contains(I))
7561         return TTI::CastContextHint::Normal;
7562 
7563       switch (getWideningDecision(I, VF)) {
7564       case LoopVectorizationCostModel::CM_GatherScatter:
7565         return TTI::CastContextHint::GatherScatter;
7566       case LoopVectorizationCostModel::CM_Interleave:
7567         return TTI::CastContextHint::Interleave;
7568       case LoopVectorizationCostModel::CM_Scalarize:
7569       case LoopVectorizationCostModel::CM_Widen:
7570         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7571                                         : TTI::CastContextHint::Normal;
7572       case LoopVectorizationCostModel::CM_Widen_Reverse:
7573         return TTI::CastContextHint::Reversed;
7574       case LoopVectorizationCostModel::CM_Unknown:
7575         llvm_unreachable("Instr did not go through cost modelling?");
7576       }
7577 
7578       llvm_unreachable("Unhandled case!");
7579     };
7580 
7581     unsigned Opcode = I->getOpcode();
7582     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7583     // For Trunc, the context is the only user, which must be a StoreInst.
7584     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7585       if (I->hasOneUse())
7586         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7587           CCH = ComputeCCH(Store);
7588     }
7589     // For Z/Sext, the context is the operand, which must be a LoadInst.
7590     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7591              Opcode == Instruction::FPExt) {
7592       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7593         CCH = ComputeCCH(Load);
7594     }
7595 
7596     // We optimize the truncation of induction variables having constant
7597     // integer steps. The cost of these truncations is the same as the scalar
7598     // operation.
7599     if (isOptimizableIVTruncate(I, VF)) {
7600       auto *Trunc = cast<TruncInst>(I);
7601       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7602                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7603     }
7604 
7605     // Detect reduction patterns
7606     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7607       return *RedCost;
7608 
7609     Type *SrcScalarTy = I->getOperand(0)->getType();
7610     Type *SrcVecTy =
7611         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7612     if (canTruncateToMinimalBitwidth(I, VF)) {
7613       // This cast is going to be shrunk. This may remove the cast or it might
7614       // turn it into slightly different cast. For example, if MinBW == 16,
7615       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7616       //
7617       // Calculate the modified src and dest types.
7618       Type *MinVecTy = VectorTy;
7619       if (Opcode == Instruction::Trunc) {
7620         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7621         VectorTy =
7622             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7623       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7624         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7625         VectorTy =
7626             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7627       }
7628     }
7629 
7630     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7631   }
7632   case Instruction::Call: {
7633     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7634       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7635         return *RedCost;
7636     bool NeedToScalarize;
7637     CallInst *CI = cast<CallInst>(I);
7638     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7639     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7640       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7641       return std::min(CallCost, IntrinsicCost);
7642     }
7643     return CallCost;
7644   }
7645   case Instruction::ExtractValue:
7646     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7647   case Instruction::Alloca:
7648     // We cannot easily widen alloca to a scalable alloca, as
7649     // the result would need to be a vector of pointers.
7650     if (VF.isScalable())
7651       return InstructionCost::getInvalid();
7652     LLVM_FALLTHROUGH;
7653   default:
7654     // This opcode is unknown. Assume that it is the same as 'mul'.
7655     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7656   } // end of switch.
7657 }
7658 
7659 char LoopVectorize::ID = 0;
7660 
7661 static const char lv_name[] = "Loop Vectorization";
7662 
7663 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7664 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7665 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7666 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7667 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7668 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7669 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7670 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7671 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7672 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7673 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7674 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7675 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7676 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7677 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7678 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7679 
7680 namespace llvm {
7681 
7682 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7683 
7684 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7685                               bool VectorizeOnlyWhenForced) {
7686   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7687 }
7688 
7689 } // end namespace llvm
7690 
7691 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7692   // Check if the pointer operand of a load or store instruction is
7693   // consecutive.
7694   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7695     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7696   return false;
7697 }
7698 
7699 void LoopVectorizationCostModel::collectValuesToIgnore() {
7700   // Ignore ephemeral values.
7701   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7702 
7703   // Ignore type-promoting instructions we identified during reduction
7704   // detection.
7705   for (auto &Reduction : Legal->getReductionVars()) {
7706     const RecurrenceDescriptor &RedDes = Reduction.second;
7707     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7708     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7709   }
7710   // Ignore type-casting instructions we identified during induction
7711   // detection.
7712   for (auto &Induction : Legal->getInductionVars()) {
7713     const InductionDescriptor &IndDes = Induction.second;
7714     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7715     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7716   }
7717 }
7718 
7719 void LoopVectorizationCostModel::collectInLoopReductions() {
7720   for (auto &Reduction : Legal->getReductionVars()) {
7721     PHINode *Phi = Reduction.first;
7722     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7723 
7724     // We don't collect reductions that are type promoted (yet).
7725     if (RdxDesc.getRecurrenceType() != Phi->getType())
7726       continue;
7727 
7728     // If the target would prefer this reduction to happen "in-loop", then we
7729     // want to record it as such.
7730     unsigned Opcode = RdxDesc.getOpcode();
7731     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7732         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7733                                    TargetTransformInfo::ReductionFlags()))
7734       continue;
7735 
7736     // Check that we can correctly put the reductions into the loop, by
7737     // finding the chain of operations that leads from the phi to the loop
7738     // exit value.
7739     SmallVector<Instruction *, 4> ReductionOperations =
7740         RdxDesc.getReductionOpChain(Phi, TheLoop);
7741     bool InLoop = !ReductionOperations.empty();
7742     if (InLoop) {
7743       InLoopReductionChains[Phi] = ReductionOperations;
7744       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7745       Instruction *LastChain = Phi;
7746       for (auto *I : ReductionOperations) {
7747         InLoopReductionImmediateChains[I] = LastChain;
7748         LastChain = I;
7749       }
7750     }
7751     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7752                       << " reduction for phi: " << *Phi << "\n");
7753   }
7754 }
7755 
7756 // TODO: we could return a pair of values that specify the max VF and
7757 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7758 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7759 // doesn't have a cost model that can choose which plan to execute if
7760 // more than one is generated.
7761 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7762                                  LoopVectorizationCostModel &CM) {
7763   unsigned WidestType;
7764   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7765   return WidestVectorRegBits / WidestType;
7766 }
7767 
7768 VectorizationFactor
7769 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7770   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7771   ElementCount VF = UserVF;
7772   // Outer loop handling: They may require CFG and instruction level
7773   // transformations before even evaluating whether vectorization is profitable.
7774   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7775   // the vectorization pipeline.
7776   if (!OrigLoop->isInnermost()) {
7777     // If the user doesn't provide a vectorization factor, determine a
7778     // reasonable one.
7779     if (UserVF.isZero()) {
7780       VF = ElementCount::getFixed(determineVPlanVF(
7781           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7782               .getFixedSize(),
7783           CM));
7784       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7785 
7786       // Make sure we have a VF > 1 for stress testing.
7787       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7788         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7789                           << "overriding computed VF.\n");
7790         VF = ElementCount::getFixed(4);
7791       }
7792     }
7793     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7794     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7795            "VF needs to be a power of two");
7796     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7797                       << "VF " << VF << " to build VPlans.\n");
7798     buildVPlans(VF, VF);
7799 
7800     // For VPlan build stress testing, we bail out after VPlan construction.
7801     if (VPlanBuildStressTest)
7802       return VectorizationFactor::Disabled();
7803 
7804     return {VF, 0 /*Cost*/};
7805   }
7806 
7807   LLVM_DEBUG(
7808       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7809                 "VPlan-native path.\n");
7810   return VectorizationFactor::Disabled();
7811 }
7812 
7813 Optional<VectorizationFactor>
7814 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7815   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7816   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7817   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7818     return None;
7819 
7820   // Invalidate interleave groups if all blocks of loop will be predicated.
7821   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7822       !useMaskedInterleavedAccesses(*TTI)) {
7823     LLVM_DEBUG(
7824         dbgs()
7825         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7826            "which requires masked-interleaved support.\n");
7827     if (CM.InterleaveInfo.invalidateGroups())
7828       // Invalidating interleave groups also requires invalidating all decisions
7829       // based on them, which includes widening decisions and uniform and scalar
7830       // values.
7831       CM.invalidateCostModelingDecisions();
7832   }
7833 
7834   ElementCount MaxUserVF =
7835       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7836   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7837   if (!UserVF.isZero() && UserVFIsLegal) {
7838     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7839            "VF needs to be a power of two");
7840     // Collect the instructions (and their associated costs) that will be more
7841     // profitable to scalarize.
7842     if (CM.selectUserVectorizationFactor(UserVF)) {
7843       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7844       CM.collectInLoopReductions();
7845       buildVPlansWithVPRecipes(UserVF, UserVF);
7846       LLVM_DEBUG(printPlans(dbgs()));
7847       return {{UserVF, 0}};
7848     } else
7849       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7850                               "InvalidCost", ORE, OrigLoop);
7851   }
7852 
7853   // Populate the set of Vectorization Factor Candidates.
7854   ElementCountSet VFCandidates;
7855   for (auto VF = ElementCount::getFixed(1);
7856        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7857     VFCandidates.insert(VF);
7858   for (auto VF = ElementCount::getScalable(1);
7859        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7860     VFCandidates.insert(VF);
7861 
7862   for (const auto &VF : VFCandidates) {
7863     // Collect Uniform and Scalar instructions after vectorization with VF.
7864     CM.collectUniformsAndScalars(VF);
7865 
7866     // Collect the instructions (and their associated costs) that will be more
7867     // profitable to scalarize.
7868     if (VF.isVector())
7869       CM.collectInstsToScalarize(VF);
7870   }
7871 
7872   CM.collectInLoopReductions();
7873   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7874   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7875 
7876   LLVM_DEBUG(printPlans(dbgs()));
7877   if (!MaxFactors.hasVector())
7878     return VectorizationFactor::Disabled();
7879 
7880   // Select the optimal vectorization factor.
7881   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7882 
7883   // Check if it is profitable to vectorize with runtime checks.
7884   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7885   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7886     bool PragmaThresholdReached =
7887         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7888     bool ThresholdReached =
7889         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7890     if ((ThresholdReached && !Hints.allowReordering()) ||
7891         PragmaThresholdReached) {
7892       ORE->emit([&]() {
7893         return OptimizationRemarkAnalysisAliasing(
7894                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7895                    OrigLoop->getHeader())
7896                << "loop not vectorized: cannot prove it is safe to reorder "
7897                   "memory operations";
7898       });
7899       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7900       Hints.emitRemarkWithHints();
7901       return VectorizationFactor::Disabled();
7902     }
7903   }
7904   return SelectedVF;
7905 }
7906 
7907 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7908   assert(count_if(VPlans,
7909                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7910              1 &&
7911          "Best VF has not a single VPlan.");
7912 
7913   for (const VPlanPtr &Plan : VPlans) {
7914     if (Plan->hasVF(VF))
7915       return *Plan.get();
7916   }
7917   llvm_unreachable("No plan found!");
7918 }
7919 
7920 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7921   SmallVector<Metadata *, 4> MDs;
7922   // Reserve first location for self reference to the LoopID metadata node.
7923   MDs.push_back(nullptr);
7924   bool IsUnrollMetadata = false;
7925   MDNode *LoopID = L->getLoopID();
7926   if (LoopID) {
7927     // First find existing loop unrolling disable metadata.
7928     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7929       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7930       if (MD) {
7931         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7932         IsUnrollMetadata =
7933             S && S->getString().startswith("llvm.loop.unroll.disable");
7934       }
7935       MDs.push_back(LoopID->getOperand(i));
7936     }
7937   }
7938 
7939   if (!IsUnrollMetadata) {
7940     // Add runtime unroll disable metadata.
7941     LLVMContext &Context = L->getHeader()->getContext();
7942     SmallVector<Metadata *, 1> DisableOperands;
7943     DisableOperands.push_back(
7944         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7945     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7946     MDs.push_back(DisableNode);
7947     MDNode *NewLoopID = MDNode::get(Context, MDs);
7948     // Set operand 0 to refer to the loop id itself.
7949     NewLoopID->replaceOperandWith(0, NewLoopID);
7950     L->setLoopID(NewLoopID);
7951   }
7952 }
7953 
7954 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7955                                            VPlan &BestVPlan,
7956                                            InnerLoopVectorizer &ILV,
7957                                            DominatorTree *DT) {
7958   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7959                     << '\n');
7960 
7961   // Perform the actual loop transformation.
7962 
7963   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7964   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7965   Value *CanonicalIVStartValue;
7966   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7967       ILV.createVectorizedLoopSkeleton();
7968   ILV.collectPoisonGeneratingRecipes(State);
7969 
7970   ILV.printDebugTracesAtStart();
7971 
7972   //===------------------------------------------------===//
7973   //
7974   // Notice: any optimization or new instruction that go
7975   // into the code below should also be implemented in
7976   // the cost-model.
7977   //
7978   //===------------------------------------------------===//
7979 
7980   // 2. Copy and widen instructions from the old loop into the new loop.
7981   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7982                              ILV.getOrCreateVectorTripCount(nullptr),
7983                              CanonicalIVStartValue, State);
7984   BestVPlan.execute(&State);
7985 
7986   // Keep all loop hints from the original loop on the vector loop (we'll
7987   // replace the vectorizer-specific hints below).
7988   MDNode *OrigLoopID = OrigLoop->getLoopID();
7989 
7990   Optional<MDNode *> VectorizedLoopID =
7991       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7992                                       LLVMLoopVectorizeFollowupVectorized});
7993 
7994   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7995   if (VectorizedLoopID.hasValue())
7996     L->setLoopID(VectorizedLoopID.getValue());
7997   else {
7998     // Keep all loop hints from the original loop on the vector loop (we'll
7999     // replace the vectorizer-specific hints below).
8000     if (MDNode *LID = OrigLoop->getLoopID())
8001       L->setLoopID(LID);
8002 
8003     LoopVectorizeHints Hints(L, true, *ORE);
8004     Hints.setAlreadyVectorized();
8005   }
8006   // Disable runtime unrolling when vectorizing the epilogue loop.
8007   if (CanonicalIVStartValue)
8008     AddRuntimeUnrollDisableMetaData(L);
8009 
8010   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8011   //    predication, updating analyses.
8012   ILV.fixVectorizedLoop(State);
8013 
8014   ILV.printDebugTracesAtEnd();
8015 }
8016 
8017 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8018 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8019   for (const auto &Plan : VPlans)
8020     if (PrintVPlansInDotFormat)
8021       Plan->printDOT(O);
8022     else
8023       Plan->print(O);
8024 }
8025 #endif
8026 
8027 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8028     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8029 
8030   // We create new control-flow for the vectorized loop, so the original exit
8031   // conditions will be dead after vectorization if it's only used by the
8032   // terminator
8033   SmallVector<BasicBlock*> ExitingBlocks;
8034   OrigLoop->getExitingBlocks(ExitingBlocks);
8035   for (auto *BB : ExitingBlocks) {
8036     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8037     if (!Cmp || !Cmp->hasOneUse())
8038       continue;
8039 
8040     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8041     if (!DeadInstructions.insert(Cmp).second)
8042       continue;
8043 
8044     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8045     // TODO: can recurse through operands in general
8046     for (Value *Op : Cmp->operands()) {
8047       if (isa<TruncInst>(Op) && Op->hasOneUse())
8048           DeadInstructions.insert(cast<Instruction>(Op));
8049     }
8050   }
8051 
8052   // We create new "steps" for induction variable updates to which the original
8053   // induction variables map. An original update instruction will be dead if
8054   // all its users except the induction variable are dead.
8055   auto *Latch = OrigLoop->getLoopLatch();
8056   for (auto &Induction : Legal->getInductionVars()) {
8057     PHINode *Ind = Induction.first;
8058     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8059 
8060     // If the tail is to be folded by masking, the primary induction variable,
8061     // if exists, isn't dead: it will be used for masking. Don't kill it.
8062     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8063       continue;
8064 
8065     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8066           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8067         }))
8068       DeadInstructions.insert(IndUpdate);
8069   }
8070 }
8071 
8072 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8073 
8074 //===--------------------------------------------------------------------===//
8075 // EpilogueVectorizerMainLoop
8076 //===--------------------------------------------------------------------===//
8077 
8078 /// This function is partially responsible for generating the control flow
8079 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8080 std::pair<BasicBlock *, Value *>
8081 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8082   MDNode *OrigLoopID = OrigLoop->getLoopID();
8083   Loop *Lp = createVectorLoopSkeleton("");
8084 
8085   // Generate the code to check the minimum iteration count of the vector
8086   // epilogue (see below).
8087   EPI.EpilogueIterationCountCheck =
8088       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8089   EPI.EpilogueIterationCountCheck->setName("iter.check");
8090 
8091   // Generate the code to check any assumptions that we've made for SCEV
8092   // expressions.
8093   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8094 
8095   // Generate the code that checks at runtime if arrays overlap. We put the
8096   // checks into a separate block to make the more common case of few elements
8097   // faster.
8098   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8099 
8100   // Generate the iteration count check for the main loop, *after* the check
8101   // for the epilogue loop, so that the path-length is shorter for the case
8102   // that goes directly through the vector epilogue. The longer-path length for
8103   // the main loop is compensated for, by the gain from vectorizing the larger
8104   // trip count. Note: the branch will get updated later on when we vectorize
8105   // the epilogue.
8106   EPI.MainLoopIterationCountCheck =
8107       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8108 
8109   // Generate the induction variable.
8110   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8111   EPI.VectorTripCount = CountRoundDown;
8112   createHeaderBranch(Lp);
8113 
8114   // Skip induction resume value creation here because they will be created in
8115   // the second pass. If we created them here, they wouldn't be used anyway,
8116   // because the vplan in the second pass still contains the inductions from the
8117   // original loop.
8118 
8119   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
8120 }
8121 
8122 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8123   LLVM_DEBUG({
8124     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8125            << "Main Loop VF:" << EPI.MainLoopVF
8126            << ", Main Loop UF:" << EPI.MainLoopUF
8127            << ", Epilogue Loop VF:" << EPI.EpilogueVF
8128            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8129   });
8130 }
8131 
8132 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8133   DEBUG_WITH_TYPE(VerboseDebug, {
8134     dbgs() << "intermediate fn:\n"
8135            << *OrigLoop->getHeader()->getParent() << "\n";
8136   });
8137 }
8138 
8139 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8140     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8141   assert(L && "Expected valid Loop.");
8142   assert(Bypass && "Expected valid bypass basic block.");
8143   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
8144   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8145   Value *Count = getOrCreateTripCount(L);
8146   // Reuse existing vector loop preheader for TC checks.
8147   // Note that new preheader block is generated for vector loop.
8148   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8149   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8150 
8151   // Generate code to check if the loop's trip count is less than VF * UF of the
8152   // main vector loop.
8153   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8154       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8155 
8156   Value *CheckMinIters = Builder.CreateICmp(
8157       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
8158       "min.iters.check");
8159 
8160   if (!ForEpilogue)
8161     TCCheckBlock->setName("vector.main.loop.iter.check");
8162 
8163   // Create new preheader for vector loop.
8164   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8165                                    DT, LI, nullptr, "vector.ph");
8166 
8167   if (ForEpilogue) {
8168     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8169                                  DT->getNode(Bypass)->getIDom()) &&
8170            "TC check is expected to dominate Bypass");
8171 
8172     // Update dominator for Bypass & LoopExit.
8173     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8174     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8175       // For loops with multiple exits, there's no edge from the middle block
8176       // to exit blocks (as the epilogue must run) and thus no need to update
8177       // the immediate dominator of the exit blocks.
8178       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8179 
8180     LoopBypassBlocks.push_back(TCCheckBlock);
8181 
8182     // Save the trip count so we don't have to regenerate it in the
8183     // vec.epilog.iter.check. This is safe to do because the trip count
8184     // generated here dominates the vector epilog iter check.
8185     EPI.TripCount = Count;
8186   }
8187 
8188   ReplaceInstWithInst(
8189       TCCheckBlock->getTerminator(),
8190       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8191 
8192   return TCCheckBlock;
8193 }
8194 
8195 //===--------------------------------------------------------------------===//
8196 // EpilogueVectorizerEpilogueLoop
8197 //===--------------------------------------------------------------------===//
8198 
8199 /// This function is partially responsible for generating the control flow
8200 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8201 std::pair<BasicBlock *, Value *>
8202 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8203   MDNode *OrigLoopID = OrigLoop->getLoopID();
8204   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8205 
8206   // Now, compare the remaining count and if there aren't enough iterations to
8207   // execute the vectorized epilogue skip to the scalar part.
8208   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8209   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8210   LoopVectorPreHeader =
8211       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8212                  LI, nullptr, "vec.epilog.ph");
8213   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8214                                           VecEpilogueIterationCountCheck);
8215 
8216   // Adjust the control flow taking the state info from the main loop
8217   // vectorization into account.
8218   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8219          "expected this to be saved from the previous pass.");
8220   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8221       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8222 
8223   DT->changeImmediateDominator(LoopVectorPreHeader,
8224                                EPI.MainLoopIterationCountCheck);
8225 
8226   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8227       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8228 
8229   if (EPI.SCEVSafetyCheck)
8230     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8231         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8232   if (EPI.MemSafetyCheck)
8233     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8234         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8235 
8236   DT->changeImmediateDominator(
8237       VecEpilogueIterationCountCheck,
8238       VecEpilogueIterationCountCheck->getSinglePredecessor());
8239 
8240   DT->changeImmediateDominator(LoopScalarPreHeader,
8241                                EPI.EpilogueIterationCountCheck);
8242   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8243     // If there is an epilogue which must run, there's no edge from the
8244     // middle block to exit blocks  and thus no need to update the immediate
8245     // dominator of the exit blocks.
8246     DT->changeImmediateDominator(LoopExitBlock,
8247                                  EPI.EpilogueIterationCountCheck);
8248 
8249   // Keep track of bypass blocks, as they feed start values to the induction
8250   // phis in the scalar loop preheader.
8251   if (EPI.SCEVSafetyCheck)
8252     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8253   if (EPI.MemSafetyCheck)
8254     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8255   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8256 
8257   // Generate a resume induction for the vector epilogue and put it in the
8258   // vector epilogue preheader
8259   Type *IdxTy = Legal->getWidestInductionType();
8260   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8261                                          LoopVectorPreHeader->getFirstNonPHI());
8262   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8263   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8264                            EPI.MainLoopIterationCountCheck);
8265 
8266   // Generate the induction variable.
8267   createHeaderBranch(Lp);
8268 
8269   // Generate induction resume values. These variables save the new starting
8270   // indexes for the scalar loop. They are used to test if there are any tail
8271   // iterations left once the vector loop has completed.
8272   // Note that when the vectorized epilogue is skipped due to iteration count
8273   // check, then the resume value for the induction variable comes from
8274   // the trip count of the main vector loop, hence passing the AdditionalBypass
8275   // argument.
8276   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8277                                    EPI.VectorTripCount} /* AdditionalBypass */);
8278 
8279   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8280 }
8281 
8282 BasicBlock *
8283 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8284     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8285 
8286   assert(EPI.TripCount &&
8287          "Expected trip count to have been safed in the first pass.");
8288   assert(
8289       (!isa<Instruction>(EPI.TripCount) ||
8290        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8291       "saved trip count does not dominate insertion point.");
8292   Value *TC = EPI.TripCount;
8293   IRBuilder<> Builder(Insert->getTerminator());
8294   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8295 
8296   // Generate code to check if the loop's trip count is less than VF * UF of the
8297   // vector epilogue loop.
8298   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8299       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8300 
8301   Value *CheckMinIters =
8302       Builder.CreateICmp(P, Count,
8303                          createStepForVF(Builder, Count->getType(),
8304                                          EPI.EpilogueVF, EPI.EpilogueUF),
8305                          "min.epilog.iters.check");
8306 
8307   ReplaceInstWithInst(
8308       Insert->getTerminator(),
8309       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8310 
8311   LoopBypassBlocks.push_back(Insert);
8312   return Insert;
8313 }
8314 
8315 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8316   LLVM_DEBUG({
8317     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8318            << "Epilogue Loop VF:" << EPI.EpilogueVF
8319            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8320   });
8321 }
8322 
8323 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8324   DEBUG_WITH_TYPE(VerboseDebug, {
8325     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8326   });
8327 }
8328 
8329 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8330     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8331   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8332   bool PredicateAtRangeStart = Predicate(Range.Start);
8333 
8334   for (ElementCount TmpVF = Range.Start * 2;
8335        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8336     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8337       Range.End = TmpVF;
8338       break;
8339     }
8340 
8341   return PredicateAtRangeStart;
8342 }
8343 
8344 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8345 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8346 /// of VF's starting at a given VF and extending it as much as possible. Each
8347 /// vectorization decision can potentially shorten this sub-range during
8348 /// buildVPlan().
8349 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8350                                            ElementCount MaxVF) {
8351   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8352   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8353     VFRange SubRange = {VF, MaxVFPlusOne};
8354     VPlans.push_back(buildVPlan(SubRange));
8355     VF = SubRange.End;
8356   }
8357 }
8358 
8359 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8360                                          VPlanPtr &Plan) {
8361   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8362 
8363   // Look for cached value.
8364   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8365   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8366   if (ECEntryIt != EdgeMaskCache.end())
8367     return ECEntryIt->second;
8368 
8369   VPValue *SrcMask = createBlockInMask(Src, Plan);
8370 
8371   // The terminator has to be a branch inst!
8372   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8373   assert(BI && "Unexpected terminator found");
8374 
8375   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8376     return EdgeMaskCache[Edge] = SrcMask;
8377 
8378   // If source is an exiting block, we know the exit edge is dynamically dead
8379   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8380   // adding uses of an otherwise potentially dead instruction.
8381   if (OrigLoop->isLoopExiting(Src))
8382     return EdgeMaskCache[Edge] = SrcMask;
8383 
8384   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8385   assert(EdgeMask && "No Edge Mask found for condition");
8386 
8387   if (BI->getSuccessor(0) != Dst)
8388     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8389 
8390   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8391     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8392     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8393     // The select version does not introduce new UB if SrcMask is false and
8394     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8395     VPValue *False = Plan->getOrAddVPValue(
8396         ConstantInt::getFalse(BI->getCondition()->getType()));
8397     EdgeMask =
8398         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8399   }
8400 
8401   return EdgeMaskCache[Edge] = EdgeMask;
8402 }
8403 
8404 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8405   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8406 
8407   // Look for cached value.
8408   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8409   if (BCEntryIt != BlockMaskCache.end())
8410     return BCEntryIt->second;
8411 
8412   // All-one mask is modelled as no-mask following the convention for masked
8413   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8414   VPValue *BlockMask = nullptr;
8415 
8416   if (OrigLoop->getHeader() == BB) {
8417     if (!CM.blockNeedsPredicationForAnyReason(BB))
8418       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8419 
8420     // Introduce the early-exit compare IV <= BTC to form header block mask.
8421     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8422     // constructing the desired canonical IV in the header block as its first
8423     // non-phi instructions.
8424     assert(CM.foldTailByMasking() && "must fold the tail");
8425     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8426     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8427 
8428     VPValue *IV = nullptr;
8429     if (Legal->getPrimaryInduction())
8430       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8431     else {
8432       auto *IVRecipe = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8433       HeaderVPBB->insert(IVRecipe, NewInsertionPoint);
8434       IV = IVRecipe;
8435     }
8436 
8437     VPBuilder::InsertPointGuard Guard(Builder);
8438     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8439     if (CM.TTI.emitGetActiveLaneMask()) {
8440       VPValue *TC = Plan->getOrCreateTripCount();
8441       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8442     } else {
8443       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8444       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8445     }
8446     return BlockMaskCache[BB] = BlockMask;
8447   }
8448 
8449   // This is the block mask. We OR all incoming edges.
8450   for (auto *Predecessor : predecessors(BB)) {
8451     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8452     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8453       return BlockMaskCache[BB] = EdgeMask;
8454 
8455     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8456       BlockMask = EdgeMask;
8457       continue;
8458     }
8459 
8460     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8461   }
8462 
8463   return BlockMaskCache[BB] = BlockMask;
8464 }
8465 
8466 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8467                                                 ArrayRef<VPValue *> Operands,
8468                                                 VFRange &Range,
8469                                                 VPlanPtr &Plan) {
8470   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8471          "Must be called with either a load or store");
8472 
8473   auto willWiden = [&](ElementCount VF) -> bool {
8474     if (VF.isScalar())
8475       return false;
8476     LoopVectorizationCostModel::InstWidening Decision =
8477         CM.getWideningDecision(I, VF);
8478     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8479            "CM decision should be taken at this point.");
8480     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8481       return true;
8482     if (CM.isScalarAfterVectorization(I, VF) ||
8483         CM.isProfitableToScalarize(I, VF))
8484       return false;
8485     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8486   };
8487 
8488   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8489     return nullptr;
8490 
8491   VPValue *Mask = nullptr;
8492   if (Legal->isMaskRequired(I))
8493     Mask = createBlockInMask(I->getParent(), Plan);
8494 
8495   // Determine if the pointer operand of the access is either consecutive or
8496   // reverse consecutive.
8497   LoopVectorizationCostModel::InstWidening Decision =
8498       CM.getWideningDecision(I, Range.Start);
8499   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8500   bool Consecutive =
8501       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8502 
8503   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8504     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8505                                               Consecutive, Reverse);
8506 
8507   StoreInst *Store = cast<StoreInst>(I);
8508   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8509                                             Mask, Consecutive, Reverse);
8510 }
8511 
8512 VPWidenIntOrFpInductionRecipe *
8513 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8514                                            ArrayRef<VPValue *> Operands) const {
8515   // Check if this is an integer or fp induction. If so, build the recipe that
8516   // produces its scalar and vector values.
8517   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) {
8518     assert(II->getStartValue() ==
8519            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8520     return new VPWidenIntOrFpInductionRecipe(Phi, Operands[0], *II);
8521   }
8522 
8523   return nullptr;
8524 }
8525 
8526 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8527     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8528     VPlan &Plan) const {
8529   // Optimize the special case where the source is a constant integer
8530   // induction variable. Notice that we can only optimize the 'trunc' case
8531   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8532   // (c) other casts depend on pointer size.
8533 
8534   // Determine whether \p K is a truncation based on an induction variable that
8535   // can be optimized.
8536   auto isOptimizableIVTruncate =
8537       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8538     return [=](ElementCount VF) -> bool {
8539       return CM.isOptimizableIVTruncate(K, VF);
8540     };
8541   };
8542 
8543   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8544           isOptimizableIVTruncate(I), Range)) {
8545 
8546     auto *Phi = cast<PHINode>(I->getOperand(0));
8547     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8548     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8549     return new VPWidenIntOrFpInductionRecipe(Phi, Start, II, I);
8550   }
8551   return nullptr;
8552 }
8553 
8554 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8555                                                 ArrayRef<VPValue *> Operands,
8556                                                 VPlanPtr &Plan) {
8557   // If all incoming values are equal, the incoming VPValue can be used directly
8558   // instead of creating a new VPBlendRecipe.
8559   VPValue *FirstIncoming = Operands[0];
8560   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8561         return FirstIncoming == Inc;
8562       })) {
8563     return Operands[0];
8564   }
8565 
8566   // We know that all PHIs in non-header blocks are converted into selects, so
8567   // we don't have to worry about the insertion order and we can just use the
8568   // builder. At this point we generate the predication tree. There may be
8569   // duplications since this is a simple recursive scan, but future
8570   // optimizations will clean it up.
8571   SmallVector<VPValue *, 2> OperandsWithMask;
8572   unsigned NumIncoming = Phi->getNumIncomingValues();
8573 
8574   for (unsigned In = 0; In < NumIncoming; In++) {
8575     VPValue *EdgeMask =
8576       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8577     assert((EdgeMask || NumIncoming == 1) &&
8578            "Multiple predecessors with one having a full mask");
8579     OperandsWithMask.push_back(Operands[In]);
8580     if (EdgeMask)
8581       OperandsWithMask.push_back(EdgeMask);
8582   }
8583   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8584 }
8585 
8586 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8587                                                    ArrayRef<VPValue *> Operands,
8588                                                    VFRange &Range) const {
8589 
8590   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8591       [this, CI](ElementCount VF) {
8592         return CM.isScalarWithPredication(CI, VF);
8593       },
8594       Range);
8595 
8596   if (IsPredicated)
8597     return nullptr;
8598 
8599   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8600   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8601              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8602              ID == Intrinsic::pseudoprobe ||
8603              ID == Intrinsic::experimental_noalias_scope_decl))
8604     return nullptr;
8605 
8606   auto willWiden = [&](ElementCount VF) -> bool {
8607     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8608     // The following case may be scalarized depending on the VF.
8609     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8610     // version of the instruction.
8611     // Is it beneficial to perform intrinsic call compared to lib call?
8612     bool NeedToScalarize = false;
8613     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8614     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8615     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8616     return UseVectorIntrinsic || !NeedToScalarize;
8617   };
8618 
8619   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8620     return nullptr;
8621 
8622   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8623   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8624 }
8625 
8626 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8627   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8628          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8629   // Instruction should be widened, unless it is scalar after vectorization,
8630   // scalarization is profitable or it is predicated.
8631   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8632     return CM.isScalarAfterVectorization(I, VF) ||
8633            CM.isProfitableToScalarize(I, VF) ||
8634            CM.isScalarWithPredication(I, VF);
8635   };
8636   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8637                                                              Range);
8638 }
8639 
8640 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8641                                            ArrayRef<VPValue *> Operands) const {
8642   auto IsVectorizableOpcode = [](unsigned Opcode) {
8643     switch (Opcode) {
8644     case Instruction::Add:
8645     case Instruction::And:
8646     case Instruction::AShr:
8647     case Instruction::BitCast:
8648     case Instruction::FAdd:
8649     case Instruction::FCmp:
8650     case Instruction::FDiv:
8651     case Instruction::FMul:
8652     case Instruction::FNeg:
8653     case Instruction::FPExt:
8654     case Instruction::FPToSI:
8655     case Instruction::FPToUI:
8656     case Instruction::FPTrunc:
8657     case Instruction::FRem:
8658     case Instruction::FSub:
8659     case Instruction::ICmp:
8660     case Instruction::IntToPtr:
8661     case Instruction::LShr:
8662     case Instruction::Mul:
8663     case Instruction::Or:
8664     case Instruction::PtrToInt:
8665     case Instruction::SDiv:
8666     case Instruction::Select:
8667     case Instruction::SExt:
8668     case Instruction::Shl:
8669     case Instruction::SIToFP:
8670     case Instruction::SRem:
8671     case Instruction::Sub:
8672     case Instruction::Trunc:
8673     case Instruction::UDiv:
8674     case Instruction::UIToFP:
8675     case Instruction::URem:
8676     case Instruction::Xor:
8677     case Instruction::ZExt:
8678       return true;
8679     }
8680     return false;
8681   };
8682 
8683   if (!IsVectorizableOpcode(I->getOpcode()))
8684     return nullptr;
8685 
8686   // Success: widen this instruction.
8687   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8688 }
8689 
8690 void VPRecipeBuilder::fixHeaderPhis() {
8691   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8692   for (VPHeaderPHIRecipe *R : PhisToFix) {
8693     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8694     VPRecipeBase *IncR =
8695         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8696     R->addOperand(IncR->getVPSingleValue());
8697   }
8698 }
8699 
8700 VPBasicBlock *VPRecipeBuilder::handleReplication(
8701     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8702     VPlanPtr &Plan) {
8703   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8704       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8705       Range);
8706 
8707   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8708       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8709       Range);
8710 
8711   // Even if the instruction is not marked as uniform, there are certain
8712   // intrinsic calls that can be effectively treated as such, so we check for
8713   // them here. Conservatively, we only do this for scalable vectors, since
8714   // for fixed-width VFs we can always fall back on full scalarization.
8715   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8716     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8717     case Intrinsic::assume:
8718     case Intrinsic::lifetime_start:
8719     case Intrinsic::lifetime_end:
8720       // For scalable vectors if one of the operands is variant then we still
8721       // want to mark as uniform, which will generate one instruction for just
8722       // the first lane of the vector. We can't scalarize the call in the same
8723       // way as for fixed-width vectors because we don't know how many lanes
8724       // there are.
8725       //
8726       // The reasons for doing it this way for scalable vectors are:
8727       //   1. For the assume intrinsic generating the instruction for the first
8728       //      lane is still be better than not generating any at all. For
8729       //      example, the input may be a splat across all lanes.
8730       //   2. For the lifetime start/end intrinsics the pointer operand only
8731       //      does anything useful when the input comes from a stack object,
8732       //      which suggests it should always be uniform. For non-stack objects
8733       //      the effect is to poison the object, which still allows us to
8734       //      remove the call.
8735       IsUniform = true;
8736       break;
8737     default:
8738       break;
8739     }
8740   }
8741 
8742   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8743                                        IsUniform, IsPredicated);
8744   setRecipe(I, Recipe);
8745   Plan->addVPValue(I, Recipe);
8746 
8747   // Find if I uses a predicated instruction. If so, it will use its scalar
8748   // value. Avoid hoisting the insert-element which packs the scalar value into
8749   // a vector value, as that happens iff all users use the vector value.
8750   for (VPValue *Op : Recipe->operands()) {
8751     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8752     if (!PredR)
8753       continue;
8754     auto *RepR =
8755         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8756     assert(RepR->isPredicated() &&
8757            "expected Replicate recipe to be predicated");
8758     RepR->setAlsoPack(false);
8759   }
8760 
8761   // Finalize the recipe for Instr, first if it is not predicated.
8762   if (!IsPredicated) {
8763     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8764     VPBB->appendRecipe(Recipe);
8765     return VPBB;
8766   }
8767   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8768 
8769   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8770   assert(SingleSucc && "VPBB must have a single successor when handling "
8771                        "predicated replication.");
8772   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8773   // Record predicated instructions for above packing optimizations.
8774   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8775   VPBlockUtils::insertBlockAfter(Region, VPBB);
8776   auto *RegSucc = new VPBasicBlock();
8777   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8778   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8779   return RegSucc;
8780 }
8781 
8782 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8783                                                       VPRecipeBase *PredRecipe,
8784                                                       VPlanPtr &Plan) {
8785   // Instructions marked for predication are replicated and placed under an
8786   // if-then construct to prevent side-effects.
8787 
8788   // Generate recipes to compute the block mask for this region.
8789   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8790 
8791   // Build the triangular if-then region.
8792   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8793   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8794   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8795   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8796   auto *PHIRecipe = Instr->getType()->isVoidTy()
8797                         ? nullptr
8798                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8799   if (PHIRecipe) {
8800     Plan->removeVPValueFor(Instr);
8801     Plan->addVPValue(Instr, PHIRecipe);
8802   }
8803   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8804   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8805   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8806 
8807   // Note: first set Entry as region entry and then connect successors starting
8808   // from it in order, to propagate the "parent" of each VPBasicBlock.
8809   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8810   VPBlockUtils::connectBlocks(Pred, Exit);
8811 
8812   return Region;
8813 }
8814 
8815 VPRecipeOrVPValueTy
8816 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8817                                         ArrayRef<VPValue *> Operands,
8818                                         VFRange &Range, VPlanPtr &Plan) {
8819   // First, check for specific widening recipes that deal with calls, memory
8820   // operations, inductions and Phi nodes.
8821   if (auto *CI = dyn_cast<CallInst>(Instr))
8822     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8823 
8824   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8825     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8826 
8827   VPRecipeBase *Recipe;
8828   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8829     if (Phi->getParent() != OrigLoop->getHeader())
8830       return tryToBlend(Phi, Operands, Plan);
8831     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8832       return toVPRecipeResult(Recipe);
8833 
8834     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8835     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8836       VPValue *StartV = Operands[0];
8837       if (Legal->isReductionVariable(Phi)) {
8838         const RecurrenceDescriptor &RdxDesc =
8839             Legal->getReductionVars().find(Phi)->second;
8840         assert(RdxDesc.getRecurrenceStartValue() ==
8841                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8842         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8843                                              CM.isInLoopReduction(Phi),
8844                                              CM.useOrderedReductions(RdxDesc));
8845       } else {
8846         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8847       }
8848 
8849       // Record the incoming value from the backedge, so we can add the incoming
8850       // value from the backedge after all recipes have been created.
8851       recordRecipeOf(cast<Instruction>(
8852           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8853       PhisToFix.push_back(PhiRecipe);
8854     } else {
8855       // TODO: record backedge value for remaining pointer induction phis.
8856       assert(Phi->getType()->isPointerTy() &&
8857              "only pointer phis should be handled here");
8858       assert(Legal->getInductionVars().count(Phi) &&
8859              "Not an induction variable");
8860       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8861       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8862       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8863     }
8864 
8865     return toVPRecipeResult(PhiRecipe);
8866   }
8867 
8868   if (isa<TruncInst>(Instr) &&
8869       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8870                                                Range, *Plan)))
8871     return toVPRecipeResult(Recipe);
8872 
8873   if (!shouldWiden(Instr, Range))
8874     return nullptr;
8875 
8876   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8877     return toVPRecipeResult(new VPWidenGEPRecipe(
8878         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8879 
8880   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8881     bool InvariantCond =
8882         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8883     return toVPRecipeResult(new VPWidenSelectRecipe(
8884         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8885   }
8886 
8887   return toVPRecipeResult(tryToWiden(Instr, Operands));
8888 }
8889 
8890 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8891                                                         ElementCount MaxVF) {
8892   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8893 
8894   // Collect instructions from the original loop that will become trivially dead
8895   // in the vectorized loop. We don't need to vectorize these instructions. For
8896   // example, original induction update instructions can become dead because we
8897   // separately emit induction "steps" when generating code for the new loop.
8898   // Similarly, we create a new latch condition when setting up the structure
8899   // of the new loop, so the old one can become dead.
8900   SmallPtrSet<Instruction *, 4> DeadInstructions;
8901   collectTriviallyDeadInstructions(DeadInstructions);
8902 
8903   // Add assume instructions we need to drop to DeadInstructions, to prevent
8904   // them from being added to the VPlan.
8905   // TODO: We only need to drop assumes in blocks that get flattend. If the
8906   // control flow is preserved, we should keep them.
8907   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8908   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8909 
8910   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8911   // Dead instructions do not need sinking. Remove them from SinkAfter.
8912   for (Instruction *I : DeadInstructions)
8913     SinkAfter.erase(I);
8914 
8915   // Cannot sink instructions after dead instructions (there won't be any
8916   // recipes for them). Instead, find the first non-dead previous instruction.
8917   for (auto &P : Legal->getSinkAfter()) {
8918     Instruction *SinkTarget = P.second;
8919     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8920     (void)FirstInst;
8921     while (DeadInstructions.contains(SinkTarget)) {
8922       assert(
8923           SinkTarget != FirstInst &&
8924           "Must find a live instruction (at least the one feeding the "
8925           "first-order recurrence PHI) before reaching beginning of the block");
8926       SinkTarget = SinkTarget->getPrevNode();
8927       assert(SinkTarget != P.first &&
8928              "sink source equals target, no sinking required");
8929     }
8930     P.second = SinkTarget;
8931   }
8932 
8933   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8934   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8935     VFRange SubRange = {VF, MaxVFPlusOne};
8936     VPlans.push_back(
8937         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8938     VF = SubRange.End;
8939   }
8940 }
8941 
8942 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8943 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8944 // BranchOnCount VPInstruction to the latch.
8945 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8946                                   bool HasNUW, bool IsVPlanNative) {
8947   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8948   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8949 
8950   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8951   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8952   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8953   if (IsVPlanNative)
8954     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8955   Header->insert(CanonicalIVPHI, Header->begin());
8956 
8957   auto *CanonicalIVIncrement =
8958       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8959                                : VPInstruction::CanonicalIVIncrement,
8960                         {CanonicalIVPHI}, DL);
8961   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8962 
8963   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8964   if (IsVPlanNative) {
8965     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8966     EB->setCondBit(nullptr);
8967   }
8968   EB->appendRecipe(CanonicalIVIncrement);
8969 
8970   auto *BranchOnCount =
8971       new VPInstruction(VPInstruction::BranchOnCount,
8972                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8973   EB->appendRecipe(BranchOnCount);
8974 }
8975 
8976 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8977     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8978     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8979 
8980   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8981 
8982   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8983 
8984   // ---------------------------------------------------------------------------
8985   // Pre-construction: record ingredients whose recipes we'll need to further
8986   // process after constructing the initial VPlan.
8987   // ---------------------------------------------------------------------------
8988 
8989   // Mark instructions we'll need to sink later and their targets as
8990   // ingredients whose recipe we'll need to record.
8991   for (auto &Entry : SinkAfter) {
8992     RecipeBuilder.recordRecipeOf(Entry.first);
8993     RecipeBuilder.recordRecipeOf(Entry.second);
8994   }
8995   for (auto &Reduction : CM.getInLoopReductionChains()) {
8996     PHINode *Phi = Reduction.first;
8997     RecurKind Kind =
8998         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8999     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9000 
9001     RecipeBuilder.recordRecipeOf(Phi);
9002     for (auto &R : ReductionOperations) {
9003       RecipeBuilder.recordRecipeOf(R);
9004       // For min/max reducitons, where we have a pair of icmp/select, we also
9005       // need to record the ICmp recipe, so it can be removed later.
9006       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9007              "Only min/max recurrences allowed for inloop reductions");
9008       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9009         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9010     }
9011   }
9012 
9013   // For each interleave group which is relevant for this (possibly trimmed)
9014   // Range, add it to the set of groups to be later applied to the VPlan and add
9015   // placeholders for its members' Recipes which we'll be replacing with a
9016   // single VPInterleaveRecipe.
9017   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9018     auto applyIG = [IG, this](ElementCount VF) -> bool {
9019       return (VF.isVector() && // Query is illegal for VF == 1
9020               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9021                   LoopVectorizationCostModel::CM_Interleave);
9022     };
9023     if (!getDecisionAndClampRange(applyIG, Range))
9024       continue;
9025     InterleaveGroups.insert(IG);
9026     for (unsigned i = 0; i < IG->getFactor(); i++)
9027       if (Instruction *Member = IG->getMember(i))
9028         RecipeBuilder.recordRecipeOf(Member);
9029   };
9030 
9031   // ---------------------------------------------------------------------------
9032   // Build initial VPlan: Scan the body of the loop in a topological order to
9033   // visit each basic block after having visited its predecessor basic blocks.
9034   // ---------------------------------------------------------------------------
9035 
9036   // Create initial VPlan skeleton, with separate header and latch blocks.
9037   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
9038   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
9039   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
9040   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
9041   auto Plan = std::make_unique<VPlan>(TopRegion);
9042 
9043   Instruction *DLInst =
9044       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
9045   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
9046                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
9047                         !CM.foldTailByMasking(), false);
9048 
9049   // Scan the body of the loop in a topological order to visit each basic block
9050   // after having visited its predecessor basic blocks.
9051   LoopBlocksDFS DFS(OrigLoop);
9052   DFS.perform(LI);
9053 
9054   VPBasicBlock *VPBB = HeaderVPBB;
9055   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
9056   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9057     // Relevant instructions from basic block BB will be grouped into VPRecipe
9058     // ingredients and fill a new VPBasicBlock.
9059     unsigned VPBBsForBB = 0;
9060     VPBB->setName(BB->getName());
9061     Builder.setInsertPoint(VPBB);
9062 
9063     // Introduce each ingredient into VPlan.
9064     // TODO: Model and preserve debug instrinsics in VPlan.
9065     for (Instruction &I : BB->instructionsWithoutDebug()) {
9066       Instruction *Instr = &I;
9067 
9068       // First filter out irrelevant instructions, to ensure no recipes are
9069       // built for them.
9070       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9071         continue;
9072 
9073       SmallVector<VPValue *, 4> Operands;
9074       auto *Phi = dyn_cast<PHINode>(Instr);
9075       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9076         Operands.push_back(Plan->getOrAddVPValue(
9077             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9078       } else {
9079         auto OpRange = Plan->mapToVPValues(Instr->operands());
9080         Operands = {OpRange.begin(), OpRange.end()};
9081       }
9082       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9083               Instr, Operands, Range, Plan)) {
9084         // If Instr can be simplified to an existing VPValue, use it.
9085         if (RecipeOrValue.is<VPValue *>()) {
9086           auto *VPV = RecipeOrValue.get<VPValue *>();
9087           Plan->addVPValue(Instr, VPV);
9088           // If the re-used value is a recipe, register the recipe for the
9089           // instruction, in case the recipe for Instr needs to be recorded.
9090           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9091             RecipeBuilder.setRecipe(Instr, R);
9092           continue;
9093         }
9094         // Otherwise, add the new recipe.
9095         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9096         for (auto *Def : Recipe->definedValues()) {
9097           auto *UV = Def->getUnderlyingValue();
9098           Plan->addVPValue(UV, Def);
9099         }
9100 
9101         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
9102             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
9103           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
9104           // of the header block. That can happen for truncates of induction
9105           // variables. Those recipes are moved to the phi section of the header
9106           // block after applying SinkAfter, which relies on the original
9107           // position of the trunc.
9108           assert(isa<TruncInst>(Instr));
9109           InductionsToMove.push_back(
9110               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
9111         }
9112         RecipeBuilder.setRecipe(Instr, Recipe);
9113         VPBB->appendRecipe(Recipe);
9114         continue;
9115       }
9116 
9117       // Otherwise, if all widening options failed, Instruction is to be
9118       // replicated. This may create a successor for VPBB.
9119       VPBasicBlock *NextVPBB =
9120           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9121       if (NextVPBB != VPBB) {
9122         VPBB = NextVPBB;
9123         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9124                                     : "");
9125       }
9126     }
9127 
9128     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
9129     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
9130   }
9131 
9132   // Fold the last, empty block into its predecessor.
9133   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
9134   assert(VPBB && "expected to fold last (empty) block");
9135   // After here, VPBB should not be used.
9136   VPBB = nullptr;
9137 
9138   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
9139          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
9140          "entry block must be set to a VPRegionBlock having a non-empty entry "
9141          "VPBasicBlock");
9142   RecipeBuilder.fixHeaderPhis();
9143 
9144   // ---------------------------------------------------------------------------
9145   // Transform initial VPlan: Apply previously taken decisions, in order, to
9146   // bring the VPlan to its final state.
9147   // ---------------------------------------------------------------------------
9148 
9149   // Apply Sink-After legal constraints.
9150   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9151     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9152     if (Region && Region->isReplicator()) {
9153       assert(Region->getNumSuccessors() == 1 &&
9154              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9155       assert(R->getParent()->size() == 1 &&
9156              "A recipe in an original replicator region must be the only "
9157              "recipe in its block");
9158       return Region;
9159     }
9160     return nullptr;
9161   };
9162   for (auto &Entry : SinkAfter) {
9163     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9164     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9165 
9166     auto *TargetRegion = GetReplicateRegion(Target);
9167     auto *SinkRegion = GetReplicateRegion(Sink);
9168     if (!SinkRegion) {
9169       // If the sink source is not a replicate region, sink the recipe directly.
9170       if (TargetRegion) {
9171         // The target is in a replication region, make sure to move Sink to
9172         // the block after it, not into the replication region itself.
9173         VPBasicBlock *NextBlock =
9174             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9175         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9176       } else
9177         Sink->moveAfter(Target);
9178       continue;
9179     }
9180 
9181     // The sink source is in a replicate region. Unhook the region from the CFG.
9182     auto *SinkPred = SinkRegion->getSinglePredecessor();
9183     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9184     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9185     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9186     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9187 
9188     if (TargetRegion) {
9189       // The target recipe is also in a replicate region, move the sink region
9190       // after the target region.
9191       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9192       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9193       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9194       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9195     } else {
9196       // The sink source is in a replicate region, we need to move the whole
9197       // replicate region, which should only contain a single recipe in the
9198       // main block.
9199       auto *SplitBlock =
9200           Target->getParent()->splitAt(std::next(Target->getIterator()));
9201 
9202       auto *SplitPred = SplitBlock->getSinglePredecessor();
9203 
9204       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9205       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9206       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9207     }
9208   }
9209 
9210   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9211 
9212   // Now that sink-after is done, move induction recipes for optimized truncates
9213   // to the phi section of the header block.
9214   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9215     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9216 
9217   // Adjust the recipes for any inloop reductions.
9218   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9219                              RecipeBuilder, Range.Start);
9220 
9221   // Introduce a recipe to combine the incoming and previous values of a
9222   // first-order recurrence.
9223   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9224     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9225     if (!RecurPhi)
9226       continue;
9227 
9228     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9229     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9230     auto *Region = GetReplicateRegion(PrevRecipe);
9231     if (Region)
9232       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9233     if (Region || PrevRecipe->isPhi())
9234       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9235     else
9236       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9237 
9238     auto *RecurSplice = cast<VPInstruction>(
9239         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9240                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9241 
9242     RecurPhi->replaceAllUsesWith(RecurSplice);
9243     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9244     // all users.
9245     RecurSplice->setOperand(0, RecurPhi);
9246   }
9247 
9248   // Interleave memory: for each Interleave Group we marked earlier as relevant
9249   // for this VPlan, replace the Recipes widening its memory instructions with a
9250   // single VPInterleaveRecipe at its insertion point.
9251   for (auto IG : InterleaveGroups) {
9252     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9253         RecipeBuilder.getRecipe(IG->getInsertPos()));
9254     SmallVector<VPValue *, 4> StoredValues;
9255     for (unsigned i = 0; i < IG->getFactor(); ++i)
9256       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9257         auto *StoreR =
9258             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9259         StoredValues.push_back(StoreR->getStoredValue());
9260       }
9261 
9262     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9263                                         Recipe->getMask());
9264     VPIG->insertBefore(Recipe);
9265     unsigned J = 0;
9266     for (unsigned i = 0; i < IG->getFactor(); ++i)
9267       if (Instruction *Member = IG->getMember(i)) {
9268         if (!Member->getType()->isVoidTy()) {
9269           VPValue *OriginalV = Plan->getVPValue(Member);
9270           Plan->removeVPValueFor(Member);
9271           Plan->addVPValue(Member, VPIG->getVPValue(J));
9272           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9273           J++;
9274         }
9275         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9276       }
9277   }
9278 
9279   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9280   // in ways that accessing values using original IR values is incorrect.
9281   Plan->disableValue2VPValue();
9282 
9283   VPlanTransforms::sinkScalarOperands(*Plan);
9284   VPlanTransforms::mergeReplicateRegions(*Plan);
9285 
9286   std::string PlanName;
9287   raw_string_ostream RSO(PlanName);
9288   ElementCount VF = Range.Start;
9289   Plan->addVF(VF);
9290   RSO << "Initial VPlan for VF={" << VF;
9291   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9292     Plan->addVF(VF);
9293     RSO << "," << VF;
9294   }
9295   RSO << "},UF>=1";
9296   RSO.flush();
9297   Plan->setName(PlanName);
9298 
9299   // Fold Exit block into its predecessor if possible.
9300   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9301   // VPBasicBlock as exit.
9302   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9303 
9304   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9305   return Plan;
9306 }
9307 
9308 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9309   // Outer loop handling: They may require CFG and instruction level
9310   // transformations before even evaluating whether vectorization is profitable.
9311   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9312   // the vectorization pipeline.
9313   assert(!OrigLoop->isInnermost());
9314   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9315 
9316   // Create new empty VPlan
9317   auto Plan = std::make_unique<VPlan>();
9318 
9319   // Build hierarchical CFG
9320   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9321   HCFGBuilder.buildHierarchicalCFG();
9322 
9323   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9324        VF *= 2)
9325     Plan->addVF(VF);
9326 
9327   if (EnableVPlanPredication) {
9328     VPlanPredicator VPP(*Plan);
9329     VPP.predicate();
9330 
9331     // Avoid running transformation to recipes until masked code generation in
9332     // VPlan-native path is in place.
9333     return Plan;
9334   }
9335 
9336   SmallPtrSet<Instruction *, 1> DeadInstructions;
9337   VPlanTransforms::VPInstructionsToVPRecipes(
9338       OrigLoop, Plan,
9339       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9340       DeadInstructions, *PSE.getSE());
9341 
9342   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9343                         true, true);
9344   return Plan;
9345 }
9346 
9347 // Adjust the recipes for reductions. For in-loop reductions the chain of
9348 // instructions leading from the loop exit instr to the phi need to be converted
9349 // to reductions, with one operand being vector and the other being the scalar
9350 // reduction chain. For other reductions, a select is introduced between the phi
9351 // and live-out recipes when folding the tail.
9352 void LoopVectorizationPlanner::adjustRecipesForReductions(
9353     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9354     ElementCount MinVF) {
9355   for (auto &Reduction : CM.getInLoopReductionChains()) {
9356     PHINode *Phi = Reduction.first;
9357     const RecurrenceDescriptor &RdxDesc =
9358         Legal->getReductionVars().find(Phi)->second;
9359     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9360 
9361     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9362       continue;
9363 
9364     // ReductionOperations are orders top-down from the phi's use to the
9365     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9366     // which of the two operands will remain scalar and which will be reduced.
9367     // For minmax the chain will be the select instructions.
9368     Instruction *Chain = Phi;
9369     for (Instruction *R : ReductionOperations) {
9370       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9371       RecurKind Kind = RdxDesc.getRecurrenceKind();
9372 
9373       VPValue *ChainOp = Plan->getVPValue(Chain);
9374       unsigned FirstOpId;
9375       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9376              "Only min/max recurrences allowed for inloop reductions");
9377       // Recognize a call to the llvm.fmuladd intrinsic.
9378       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9379       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9380              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9381       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9382         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9383                "Expected to replace a VPWidenSelectSC");
9384         FirstOpId = 1;
9385       } else {
9386         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9387                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9388                "Expected to replace a VPWidenSC");
9389         FirstOpId = 0;
9390       }
9391       unsigned VecOpId =
9392           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9393       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9394 
9395       auto *CondOp = CM.foldTailByMasking()
9396                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9397                          : nullptr;
9398 
9399       if (IsFMulAdd) {
9400         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9401         // need to create an fmul recipe to use as the vector operand for the
9402         // fadd reduction.
9403         VPInstruction *FMulRecipe = new VPInstruction(
9404             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9405         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9406         WidenRecipe->getParent()->insert(FMulRecipe,
9407                                          WidenRecipe->getIterator());
9408         VecOp = FMulRecipe;
9409       }
9410       VPReductionRecipe *RedRecipe =
9411           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9412       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9413       Plan->removeVPValueFor(R);
9414       Plan->addVPValue(R, RedRecipe);
9415       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9416       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9417       WidenRecipe->eraseFromParent();
9418 
9419       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9420         VPRecipeBase *CompareRecipe =
9421             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9422         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9423                "Expected to replace a VPWidenSC");
9424         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9425                "Expected no remaining users");
9426         CompareRecipe->eraseFromParent();
9427       }
9428       Chain = R;
9429     }
9430   }
9431 
9432   // If tail is folded by masking, introduce selects between the phi
9433   // and the live-out instruction of each reduction, at the beginning of the
9434   // dedicated latch block.
9435   if (CM.foldTailByMasking()) {
9436     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9437     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9438       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9439       if (!PhiR || PhiR->isInLoop())
9440         continue;
9441       VPValue *Cond =
9442           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9443       VPValue *Red = PhiR->getBackedgeValue();
9444       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9445              "reduction recipe must be defined before latch");
9446       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9447     }
9448   }
9449 }
9450 
9451 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9452 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9453                                VPSlotTracker &SlotTracker) const {
9454   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9455   IG->getInsertPos()->printAsOperand(O, false);
9456   O << ", ";
9457   getAddr()->printAsOperand(O, SlotTracker);
9458   VPValue *Mask = getMask();
9459   if (Mask) {
9460     O << ", ";
9461     Mask->printAsOperand(O, SlotTracker);
9462   }
9463 
9464   unsigned OpIdx = 0;
9465   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9466     if (!IG->getMember(i))
9467       continue;
9468     if (getNumStoreOperands() > 0) {
9469       O << "\n" << Indent << "  store ";
9470       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9471       O << " to index " << i;
9472     } else {
9473       O << "\n" << Indent << "  ";
9474       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9475       O << " = load from index " << i;
9476     }
9477     ++OpIdx;
9478   }
9479 }
9480 #endif
9481 
9482 void VPWidenCallRecipe::execute(VPTransformState &State) {
9483   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9484                                   *this, State);
9485 }
9486 
9487 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9488   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9489   State.ILV->setDebugLocFromInst(&I);
9490 
9491   // The condition can be loop invariant  but still defined inside the
9492   // loop. This means that we can't just use the original 'cond' value.
9493   // We have to take the 'vectorized' value and pick the first lane.
9494   // Instcombine will make this a no-op.
9495   auto *InvarCond =
9496       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9497 
9498   for (unsigned Part = 0; Part < State.UF; ++Part) {
9499     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9500     Value *Op0 = State.get(getOperand(1), Part);
9501     Value *Op1 = State.get(getOperand(2), Part);
9502     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9503     State.set(this, Sel, Part);
9504     State.ILV->addMetadata(Sel, &I);
9505   }
9506 }
9507 
9508 void VPWidenRecipe::execute(VPTransformState &State) {
9509   auto &I = *cast<Instruction>(getUnderlyingValue());
9510   auto &Builder = State.Builder;
9511   switch (I.getOpcode()) {
9512   case Instruction::Call:
9513   case Instruction::Br:
9514   case Instruction::PHI:
9515   case Instruction::GetElementPtr:
9516   case Instruction::Select:
9517     llvm_unreachable("This instruction is handled by a different recipe.");
9518   case Instruction::UDiv:
9519   case Instruction::SDiv:
9520   case Instruction::SRem:
9521   case Instruction::URem:
9522   case Instruction::Add:
9523   case Instruction::FAdd:
9524   case Instruction::Sub:
9525   case Instruction::FSub:
9526   case Instruction::FNeg:
9527   case Instruction::Mul:
9528   case Instruction::FMul:
9529   case Instruction::FDiv:
9530   case Instruction::FRem:
9531   case Instruction::Shl:
9532   case Instruction::LShr:
9533   case Instruction::AShr:
9534   case Instruction::And:
9535   case Instruction::Or:
9536   case Instruction::Xor: {
9537     // Just widen unops and binops.
9538     State.ILV->setDebugLocFromInst(&I);
9539 
9540     for (unsigned Part = 0; Part < State.UF; ++Part) {
9541       SmallVector<Value *, 2> Ops;
9542       for (VPValue *VPOp : operands())
9543         Ops.push_back(State.get(VPOp, Part));
9544 
9545       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9546 
9547       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9548         VecOp->copyIRFlags(&I);
9549 
9550         // If the instruction is vectorized and was in a basic block that needed
9551         // predication, we can't propagate poison-generating flags (nuw/nsw,
9552         // exact, etc.). The control flow has been linearized and the
9553         // instruction is no longer guarded by the predicate, which could make
9554         // the flag properties to no longer hold.
9555         if (State.MayGeneratePoisonRecipes.contains(this))
9556           VecOp->dropPoisonGeneratingFlags();
9557       }
9558 
9559       // Use this vector value for all users of the original instruction.
9560       State.set(this, V, Part);
9561       State.ILV->addMetadata(V, &I);
9562     }
9563 
9564     break;
9565   }
9566   case Instruction::ICmp:
9567   case Instruction::FCmp: {
9568     // Widen compares. Generate vector compares.
9569     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9570     auto *Cmp = cast<CmpInst>(&I);
9571     State.ILV->setDebugLocFromInst(Cmp);
9572     for (unsigned Part = 0; Part < State.UF; ++Part) {
9573       Value *A = State.get(getOperand(0), Part);
9574       Value *B = State.get(getOperand(1), Part);
9575       Value *C = nullptr;
9576       if (FCmp) {
9577         // Propagate fast math flags.
9578         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9579         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9580         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9581       } else {
9582         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9583       }
9584       State.set(this, C, Part);
9585       State.ILV->addMetadata(C, &I);
9586     }
9587 
9588     break;
9589   }
9590 
9591   case Instruction::ZExt:
9592   case Instruction::SExt:
9593   case Instruction::FPToUI:
9594   case Instruction::FPToSI:
9595   case Instruction::FPExt:
9596   case Instruction::PtrToInt:
9597   case Instruction::IntToPtr:
9598   case Instruction::SIToFP:
9599   case Instruction::UIToFP:
9600   case Instruction::Trunc:
9601   case Instruction::FPTrunc:
9602   case Instruction::BitCast: {
9603     auto *CI = cast<CastInst>(&I);
9604     State.ILV->setDebugLocFromInst(CI);
9605 
9606     /// Vectorize casts.
9607     Type *DestTy = (State.VF.isScalar())
9608                        ? CI->getType()
9609                        : VectorType::get(CI->getType(), State.VF);
9610 
9611     for (unsigned Part = 0; Part < State.UF; ++Part) {
9612       Value *A = State.get(getOperand(0), Part);
9613       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9614       State.set(this, Cast, Part);
9615       State.ILV->addMetadata(Cast, &I);
9616     }
9617     break;
9618   }
9619   default:
9620     // This instruction is not vectorized by simple widening.
9621     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9622     llvm_unreachable("Unhandled instruction!");
9623   } // end of switch.
9624 }
9625 
9626 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9627   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9628   // Construct a vector GEP by widening the operands of the scalar GEP as
9629   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9630   // results in a vector of pointers when at least one operand of the GEP
9631   // is vector-typed. Thus, to keep the representation compact, we only use
9632   // vector-typed operands for loop-varying values.
9633 
9634   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9635     // If we are vectorizing, but the GEP has only loop-invariant operands,
9636     // the GEP we build (by only using vector-typed operands for
9637     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9638     // produce a vector of pointers, we need to either arbitrarily pick an
9639     // operand to broadcast, or broadcast a clone of the original GEP.
9640     // Here, we broadcast a clone of the original.
9641     //
9642     // TODO: If at some point we decide to scalarize instructions having
9643     //       loop-invariant operands, this special case will no longer be
9644     //       required. We would add the scalarization decision to
9645     //       collectLoopScalars() and teach getVectorValue() to broadcast
9646     //       the lane-zero scalar value.
9647     auto *Clone = State.Builder.Insert(GEP->clone());
9648     for (unsigned Part = 0; Part < State.UF; ++Part) {
9649       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9650       State.set(this, EntryPart, Part);
9651       State.ILV->addMetadata(EntryPart, GEP);
9652     }
9653   } else {
9654     // If the GEP has at least one loop-varying operand, we are sure to
9655     // produce a vector of pointers. But if we are only unrolling, we want
9656     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9657     // produce with the code below will be scalar (if VF == 1) or vector
9658     // (otherwise). Note that for the unroll-only case, we still maintain
9659     // values in the vector mapping with initVector, as we do for other
9660     // instructions.
9661     for (unsigned Part = 0; Part < State.UF; ++Part) {
9662       // The pointer operand of the new GEP. If it's loop-invariant, we
9663       // won't broadcast it.
9664       auto *Ptr = IsPtrLoopInvariant
9665                       ? State.get(getOperand(0), VPIteration(0, 0))
9666                       : State.get(getOperand(0), Part);
9667 
9668       // Collect all the indices for the new GEP. If any index is
9669       // loop-invariant, we won't broadcast it.
9670       SmallVector<Value *, 4> Indices;
9671       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9672         VPValue *Operand = getOperand(I);
9673         if (IsIndexLoopInvariant[I - 1])
9674           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9675         else
9676           Indices.push_back(State.get(Operand, Part));
9677       }
9678 
9679       // If the GEP instruction is vectorized and was in a basic block that
9680       // needed predication, we can't propagate the poison-generating 'inbounds'
9681       // flag. The control flow has been linearized and the GEP is no longer
9682       // guarded by the predicate, which could make the 'inbounds' properties to
9683       // no longer hold.
9684       bool IsInBounds =
9685           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9686 
9687       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9688       // but it should be a vector, otherwise.
9689       auto *NewGEP = IsInBounds
9690                          ? State.Builder.CreateInBoundsGEP(
9691                                GEP->getSourceElementType(), Ptr, Indices)
9692                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9693                                                    Ptr, Indices);
9694       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9695              "NewGEP is not a pointer vector");
9696       State.set(this, NewGEP, Part);
9697       State.ILV->addMetadata(NewGEP, GEP);
9698     }
9699   }
9700 }
9701 
9702 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9703   assert(!State.Instance && "Int or FP induction being replicated.");
9704   auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9705   State.ILV->widenIntOrFpInduction(IV, getInductionDescriptor(),
9706                                    getStartValue()->getLiveInIRValue(),
9707                                    getTruncInst(), this, State, CanonicalIV);
9708 }
9709 
9710 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9711   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9712                                  State);
9713 }
9714 
9715 void VPBlendRecipe::execute(VPTransformState &State) {
9716   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9717   // We know that all PHIs in non-header blocks are converted into
9718   // selects, so we don't have to worry about the insertion order and we
9719   // can just use the builder.
9720   // At this point we generate the predication tree. There may be
9721   // duplications since this is a simple recursive scan, but future
9722   // optimizations will clean it up.
9723 
9724   unsigned NumIncoming = getNumIncomingValues();
9725 
9726   // Generate a sequence of selects of the form:
9727   // SELECT(Mask3, In3,
9728   //        SELECT(Mask2, In2,
9729   //               SELECT(Mask1, In1,
9730   //                      In0)))
9731   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9732   // are essentially undef are taken from In0.
9733   InnerLoopVectorizer::VectorParts Entry(State.UF);
9734   for (unsigned In = 0; In < NumIncoming; ++In) {
9735     for (unsigned Part = 0; Part < State.UF; ++Part) {
9736       // We might have single edge PHIs (blocks) - use an identity
9737       // 'select' for the first PHI operand.
9738       Value *In0 = State.get(getIncomingValue(In), Part);
9739       if (In == 0)
9740         Entry[Part] = In0; // Initialize with the first incoming value.
9741       else {
9742         // Select between the current value and the previous incoming edge
9743         // based on the incoming mask.
9744         Value *Cond = State.get(getMask(In), Part);
9745         Entry[Part] =
9746             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9747       }
9748     }
9749   }
9750   for (unsigned Part = 0; Part < State.UF; ++Part)
9751     State.set(this, Entry[Part], Part);
9752 }
9753 
9754 void VPInterleaveRecipe::execute(VPTransformState &State) {
9755   assert(!State.Instance && "Interleave group being replicated.");
9756   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9757                                       getStoredValues(), getMask());
9758 }
9759 
9760 void VPReductionRecipe::execute(VPTransformState &State) {
9761   assert(!State.Instance && "Reduction being replicated.");
9762   Value *PrevInChain = State.get(getChainOp(), 0);
9763   RecurKind Kind = RdxDesc->getRecurrenceKind();
9764   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9765   // Propagate the fast-math flags carried by the underlying instruction.
9766   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9767   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9768   for (unsigned Part = 0; Part < State.UF; ++Part) {
9769     Value *NewVecOp = State.get(getVecOp(), Part);
9770     if (VPValue *Cond = getCondOp()) {
9771       Value *NewCond = State.get(Cond, Part);
9772       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9773       Value *Iden = RdxDesc->getRecurrenceIdentity(
9774           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9775       Value *IdenVec =
9776           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9777       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9778       NewVecOp = Select;
9779     }
9780     Value *NewRed;
9781     Value *NextInChain;
9782     if (IsOrdered) {
9783       if (State.VF.isVector())
9784         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9785                                         PrevInChain);
9786       else
9787         NewRed = State.Builder.CreateBinOp(
9788             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9789             NewVecOp);
9790       PrevInChain = NewRed;
9791     } else {
9792       PrevInChain = State.get(getChainOp(), Part);
9793       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9794     }
9795     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9796       NextInChain =
9797           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9798                          NewRed, PrevInChain);
9799     } else if (IsOrdered)
9800       NextInChain = NewRed;
9801     else
9802       NextInChain = State.Builder.CreateBinOp(
9803           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9804           PrevInChain);
9805     State.set(this, NextInChain, Part);
9806   }
9807 }
9808 
9809 void VPReplicateRecipe::execute(VPTransformState &State) {
9810   if (State.Instance) { // Generate a single instance.
9811     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9812     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9813                                     IsPredicated, State);
9814     // Insert scalar instance packing it into a vector.
9815     if (AlsoPack && State.VF.isVector()) {
9816       // If we're constructing lane 0, initialize to start from poison.
9817       if (State.Instance->Lane.isFirstLane()) {
9818         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9819         Value *Poison = PoisonValue::get(
9820             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9821         State.set(this, Poison, State.Instance->Part);
9822       }
9823       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9824     }
9825     return;
9826   }
9827 
9828   // Generate scalar instances for all VF lanes of all UF parts, unless the
9829   // instruction is uniform inwhich case generate only the first lane for each
9830   // of the UF parts.
9831   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9832   assert((!State.VF.isScalable() || IsUniform) &&
9833          "Can't scalarize a scalable vector");
9834   for (unsigned Part = 0; Part < State.UF; ++Part)
9835     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9836       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9837                                       VPIteration(Part, Lane), IsPredicated,
9838                                       State);
9839 }
9840 
9841 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9842   assert(State.Instance && "Branch on Mask works only on single instance.");
9843 
9844   unsigned Part = State.Instance->Part;
9845   unsigned Lane = State.Instance->Lane.getKnownLane();
9846 
9847   Value *ConditionBit = nullptr;
9848   VPValue *BlockInMask = getMask();
9849   if (BlockInMask) {
9850     ConditionBit = State.get(BlockInMask, Part);
9851     if (ConditionBit->getType()->isVectorTy())
9852       ConditionBit = State.Builder.CreateExtractElement(
9853           ConditionBit, State.Builder.getInt32(Lane));
9854   } else // Block in mask is all-one.
9855     ConditionBit = State.Builder.getTrue();
9856 
9857   // Replace the temporary unreachable terminator with a new conditional branch,
9858   // whose two destinations will be set later when they are created.
9859   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9860   assert(isa<UnreachableInst>(CurrentTerminator) &&
9861          "Expected to replace unreachable terminator with conditional branch.");
9862   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9863   CondBr->setSuccessor(0, nullptr);
9864   ReplaceInstWithInst(CurrentTerminator, CondBr);
9865 }
9866 
9867 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9868   assert(State.Instance && "Predicated instruction PHI works per instance.");
9869   Instruction *ScalarPredInst =
9870       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9871   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9872   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9873   assert(PredicatingBB && "Predicated block has no single predecessor.");
9874   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9875          "operand must be VPReplicateRecipe");
9876 
9877   // By current pack/unpack logic we need to generate only a single phi node: if
9878   // a vector value for the predicated instruction exists at this point it means
9879   // the instruction has vector users only, and a phi for the vector value is
9880   // needed. In this case the recipe of the predicated instruction is marked to
9881   // also do that packing, thereby "hoisting" the insert-element sequence.
9882   // Otherwise, a phi node for the scalar value is needed.
9883   unsigned Part = State.Instance->Part;
9884   if (State.hasVectorValue(getOperand(0), Part)) {
9885     Value *VectorValue = State.get(getOperand(0), Part);
9886     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9887     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9888     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9889     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9890     if (State.hasVectorValue(this, Part))
9891       State.reset(this, VPhi, Part);
9892     else
9893       State.set(this, VPhi, Part);
9894     // NOTE: Currently we need to update the value of the operand, so the next
9895     // predicated iteration inserts its generated value in the correct vector.
9896     State.reset(getOperand(0), VPhi, Part);
9897   } else {
9898     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9899     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9900     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9901                      PredicatingBB);
9902     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9903     if (State.hasScalarValue(this, *State.Instance))
9904       State.reset(this, Phi, *State.Instance);
9905     else
9906       State.set(this, Phi, *State.Instance);
9907     // NOTE: Currently we need to update the value of the operand, so the next
9908     // predicated iteration inserts its generated value in the correct vector.
9909     State.reset(getOperand(0), Phi, *State.Instance);
9910   }
9911 }
9912 
9913 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9914   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9915 
9916   // Attempt to issue a wide load.
9917   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9918   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9919 
9920   assert((LI || SI) && "Invalid Load/Store instruction");
9921   assert((!SI || StoredValue) && "No stored value provided for widened store");
9922   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9923 
9924   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9925 
9926   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9927   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9928   bool CreateGatherScatter = !Consecutive;
9929 
9930   auto &Builder = State.Builder;
9931   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9932   bool isMaskRequired = getMask();
9933   if (isMaskRequired)
9934     for (unsigned Part = 0; Part < State.UF; ++Part)
9935       BlockInMaskParts[Part] = State.get(getMask(), Part);
9936 
9937   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9938     // Calculate the pointer for the specific unroll-part.
9939     GetElementPtrInst *PartPtr = nullptr;
9940 
9941     bool InBounds = false;
9942     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9943       InBounds = gep->isInBounds();
9944     if (Reverse) {
9945       // If the address is consecutive but reversed, then the
9946       // wide store needs to start at the last vector element.
9947       // RunTimeVF =  VScale * VF.getKnownMinValue()
9948       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9949       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9950       // NumElt = -Part * RunTimeVF
9951       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9952       // LastLane = 1 - RunTimeVF
9953       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9954       PartPtr =
9955           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9956       PartPtr->setIsInBounds(InBounds);
9957       PartPtr = cast<GetElementPtrInst>(
9958           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9959       PartPtr->setIsInBounds(InBounds);
9960       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9961         BlockInMaskParts[Part] =
9962             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9963     } else {
9964       Value *Increment =
9965           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9966       PartPtr = cast<GetElementPtrInst>(
9967           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9968       PartPtr->setIsInBounds(InBounds);
9969     }
9970 
9971     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9972     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9973   };
9974 
9975   // Handle Stores:
9976   if (SI) {
9977     State.ILV->setDebugLocFromInst(SI);
9978 
9979     for (unsigned Part = 0; Part < State.UF; ++Part) {
9980       Instruction *NewSI = nullptr;
9981       Value *StoredVal = State.get(StoredValue, Part);
9982       if (CreateGatherScatter) {
9983         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9984         Value *VectorGep = State.get(getAddr(), Part);
9985         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9986                                             MaskPart);
9987       } else {
9988         if (Reverse) {
9989           // If we store to reverse consecutive memory locations, then we need
9990           // to reverse the order of elements in the stored value.
9991           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
9992           // We don't want to update the value in the map as it might be used in
9993           // another expression. So don't call resetVectorValue(StoredVal).
9994         }
9995         auto *VecPtr =
9996             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
9997         if (isMaskRequired)
9998           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
9999                                             BlockInMaskParts[Part]);
10000         else
10001           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10002       }
10003       State.ILV->addMetadata(NewSI, SI);
10004     }
10005     return;
10006   }
10007 
10008   // Handle loads.
10009   assert(LI && "Must have a load instruction");
10010   State.ILV->setDebugLocFromInst(LI);
10011   for (unsigned Part = 0; Part < State.UF; ++Part) {
10012     Value *NewLI;
10013     if (CreateGatherScatter) {
10014       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10015       Value *VectorGep = State.get(getAddr(), Part);
10016       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10017                                          nullptr, "wide.masked.gather");
10018       State.ILV->addMetadata(NewLI, LI);
10019     } else {
10020       auto *VecPtr =
10021           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10022       if (isMaskRequired)
10023         NewLI = Builder.CreateMaskedLoad(
10024             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10025             PoisonValue::get(DataTy), "wide.masked.load");
10026       else
10027         NewLI =
10028             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10029 
10030       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10031       State.ILV->addMetadata(NewLI, LI);
10032       if (Reverse)
10033         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10034     }
10035 
10036     State.set(this, NewLI, Part);
10037   }
10038 }
10039 
10040 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10041 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10042 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10043 // for predication.
10044 static ScalarEpilogueLowering getScalarEpilogueLowering(
10045     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10046     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10047     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10048     LoopVectorizationLegality &LVL) {
10049   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10050   // don't look at hints or options, and don't request a scalar epilogue.
10051   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10052   // LoopAccessInfo (due to code dependency and not being able to reliably get
10053   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10054   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10055   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10056   // back to the old way and vectorize with versioning when forced. See D81345.)
10057   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10058                                                       PGSOQueryType::IRPass) &&
10059                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10060     return CM_ScalarEpilogueNotAllowedOptSize;
10061 
10062   // 2) If set, obey the directives
10063   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10064     switch (PreferPredicateOverEpilogue) {
10065     case PreferPredicateTy::ScalarEpilogue:
10066       return CM_ScalarEpilogueAllowed;
10067     case PreferPredicateTy::PredicateElseScalarEpilogue:
10068       return CM_ScalarEpilogueNotNeededUsePredicate;
10069     case PreferPredicateTy::PredicateOrDontVectorize:
10070       return CM_ScalarEpilogueNotAllowedUsePredicate;
10071     };
10072   }
10073 
10074   // 3) If set, obey the hints
10075   switch (Hints.getPredicate()) {
10076   case LoopVectorizeHints::FK_Enabled:
10077     return CM_ScalarEpilogueNotNeededUsePredicate;
10078   case LoopVectorizeHints::FK_Disabled:
10079     return CM_ScalarEpilogueAllowed;
10080   };
10081 
10082   // 4) if the TTI hook indicates this is profitable, request predication.
10083   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10084                                        LVL.getLAI()))
10085     return CM_ScalarEpilogueNotNeededUsePredicate;
10086 
10087   return CM_ScalarEpilogueAllowed;
10088 }
10089 
10090 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10091   // If Values have been set for this Def return the one relevant for \p Part.
10092   if (hasVectorValue(Def, Part))
10093     return Data.PerPartOutput[Def][Part];
10094 
10095   if (!hasScalarValue(Def, {Part, 0})) {
10096     Value *IRV = Def->getLiveInIRValue();
10097     Value *B = ILV->getBroadcastInstrs(IRV);
10098     set(Def, B, Part);
10099     return B;
10100   }
10101 
10102   Value *ScalarValue = get(Def, {Part, 0});
10103   // If we aren't vectorizing, we can just copy the scalar map values over
10104   // to the vector map.
10105   if (VF.isScalar()) {
10106     set(Def, ScalarValue, Part);
10107     return ScalarValue;
10108   }
10109 
10110   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10111   bool IsUniform = RepR && RepR->isUniform();
10112 
10113   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10114   // Check if there is a scalar value for the selected lane.
10115   if (!hasScalarValue(Def, {Part, LastLane})) {
10116     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10117     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
10118            "unexpected recipe found to be invariant");
10119     IsUniform = true;
10120     LastLane = 0;
10121   }
10122 
10123   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10124   // Set the insert point after the last scalarized instruction or after the
10125   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10126   // will directly follow the scalar definitions.
10127   auto OldIP = Builder.saveIP();
10128   auto NewIP =
10129       isa<PHINode>(LastInst)
10130           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10131           : std::next(BasicBlock::iterator(LastInst));
10132   Builder.SetInsertPoint(&*NewIP);
10133 
10134   // However, if we are vectorizing, we need to construct the vector values.
10135   // If the value is known to be uniform after vectorization, we can just
10136   // broadcast the scalar value corresponding to lane zero for each unroll
10137   // iteration. Otherwise, we construct the vector values using
10138   // insertelement instructions. Since the resulting vectors are stored in
10139   // State, we will only generate the insertelements once.
10140   Value *VectorValue = nullptr;
10141   if (IsUniform) {
10142     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10143     set(Def, VectorValue, Part);
10144   } else {
10145     // Initialize packing with insertelements to start from undef.
10146     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10147     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10148     set(Def, Undef, Part);
10149     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10150       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10151     VectorValue = get(Def, Part);
10152   }
10153   Builder.restoreIP(OldIP);
10154   return VectorValue;
10155 }
10156 
10157 // Process the loop in the VPlan-native vectorization path. This path builds
10158 // VPlan upfront in the vectorization pipeline, which allows to apply
10159 // VPlan-to-VPlan transformations from the very beginning without modifying the
10160 // input LLVM IR.
10161 static bool processLoopInVPlanNativePath(
10162     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10163     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10164     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10165     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10166     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10167     LoopVectorizationRequirements &Requirements) {
10168 
10169   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10170     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10171     return false;
10172   }
10173   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10174   Function *F = L->getHeader()->getParent();
10175   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10176 
10177   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10178       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10179 
10180   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10181                                 &Hints, IAI);
10182   // Use the planner for outer loop vectorization.
10183   // TODO: CM is not used at this point inside the planner. Turn CM into an
10184   // optional argument if we don't need it in the future.
10185   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10186                                Requirements, ORE);
10187 
10188   // Get user vectorization factor.
10189   ElementCount UserVF = Hints.getWidth();
10190 
10191   CM.collectElementTypesForWidening();
10192 
10193   // Plan how to best vectorize, return the best VF and its cost.
10194   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10195 
10196   // If we are stress testing VPlan builds, do not attempt to generate vector
10197   // code. Masked vector code generation support will follow soon.
10198   // Also, do not attempt to vectorize if no vector code will be produced.
10199   if (VPlanBuildStressTest || EnableVPlanPredication ||
10200       VectorizationFactor::Disabled() == VF)
10201     return false;
10202 
10203   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10204 
10205   {
10206     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10207                              F->getParent()->getDataLayout());
10208     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10209                            &CM, BFI, PSI, Checks);
10210     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10211                       << L->getHeader()->getParent()->getName() << "\"\n");
10212     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10213   }
10214 
10215   // Mark the loop as already vectorized to avoid vectorizing again.
10216   Hints.setAlreadyVectorized();
10217   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10218   return true;
10219 }
10220 
10221 // Emit a remark if there are stores to floats that required a floating point
10222 // extension. If the vectorized loop was generated with floating point there
10223 // will be a performance penalty from the conversion overhead and the change in
10224 // the vector width.
10225 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10226   SmallVector<Instruction *, 4> Worklist;
10227   for (BasicBlock *BB : L->getBlocks()) {
10228     for (Instruction &Inst : *BB) {
10229       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10230         if (S->getValueOperand()->getType()->isFloatTy())
10231           Worklist.push_back(S);
10232       }
10233     }
10234   }
10235 
10236   // Traverse the floating point stores upwards searching, for floating point
10237   // conversions.
10238   SmallPtrSet<const Instruction *, 4> Visited;
10239   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10240   while (!Worklist.empty()) {
10241     auto *I = Worklist.pop_back_val();
10242     if (!L->contains(I))
10243       continue;
10244     if (!Visited.insert(I).second)
10245       continue;
10246 
10247     // Emit a remark if the floating point store required a floating
10248     // point conversion.
10249     // TODO: More work could be done to identify the root cause such as a
10250     // constant or a function return type and point the user to it.
10251     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10252       ORE->emit([&]() {
10253         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10254                                           I->getDebugLoc(), L->getHeader())
10255                << "floating point conversion changes vector width. "
10256                << "Mixed floating point precision requires an up/down "
10257                << "cast that will negatively impact performance.";
10258       });
10259 
10260     for (Use &Op : I->operands())
10261       if (auto *OpI = dyn_cast<Instruction>(Op))
10262         Worklist.push_back(OpI);
10263   }
10264 }
10265 
10266 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10267     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10268                                !EnableLoopInterleaving),
10269       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10270                               !EnableLoopVectorization) {}
10271 
10272 bool LoopVectorizePass::processLoop(Loop *L) {
10273   assert((EnableVPlanNativePath || L->isInnermost()) &&
10274          "VPlan-native path is not enabled. Only process inner loops.");
10275 
10276 #ifndef NDEBUG
10277   const std::string DebugLocStr = getDebugLocString(L);
10278 #endif /* NDEBUG */
10279 
10280   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
10281                     << L->getHeader()->getParent()->getName() << "\" from "
10282                     << DebugLocStr << "\n");
10283 
10284   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10285 
10286   LLVM_DEBUG(
10287       dbgs() << "LV: Loop hints:"
10288              << " force="
10289              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10290                      ? "disabled"
10291                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10292                             ? "enabled"
10293                             : "?"))
10294              << " width=" << Hints.getWidth()
10295              << " interleave=" << Hints.getInterleave() << "\n");
10296 
10297   // Function containing loop
10298   Function *F = L->getHeader()->getParent();
10299 
10300   // Looking at the diagnostic output is the only way to determine if a loop
10301   // was vectorized (other than looking at the IR or machine code), so it
10302   // is important to generate an optimization remark for each loop. Most of
10303   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10304   // generated as OptimizationRemark and OptimizationRemarkMissed are
10305   // less verbose reporting vectorized loops and unvectorized loops that may
10306   // benefit from vectorization, respectively.
10307 
10308   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10309     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10310     return false;
10311   }
10312 
10313   PredicatedScalarEvolution PSE(*SE, *L);
10314 
10315   // Check if it is legal to vectorize the loop.
10316   LoopVectorizationRequirements Requirements;
10317   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10318                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10319   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10320     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10321     Hints.emitRemarkWithHints();
10322     return false;
10323   }
10324 
10325   // Check the function attributes and profiles to find out if this function
10326   // should be optimized for size.
10327   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10328       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10329 
10330   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10331   // here. They may require CFG and instruction level transformations before
10332   // even evaluating whether vectorization is profitable. Since we cannot modify
10333   // the incoming IR, we need to build VPlan upfront in the vectorization
10334   // pipeline.
10335   if (!L->isInnermost())
10336     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10337                                         ORE, BFI, PSI, Hints, Requirements);
10338 
10339   assert(L->isInnermost() && "Inner loop expected.");
10340 
10341   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10342   // count by optimizing for size, to minimize overheads.
10343   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10344   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10345     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10346                       << "This loop is worth vectorizing only if no scalar "
10347                       << "iteration overheads are incurred.");
10348     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10349       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10350     else {
10351       LLVM_DEBUG(dbgs() << "\n");
10352       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10353     }
10354   }
10355 
10356   // Check the function attributes to see if implicit floats are allowed.
10357   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10358   // an integer loop and the vector instructions selected are purely integer
10359   // vector instructions?
10360   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10361     reportVectorizationFailure(
10362         "Can't vectorize when the NoImplicitFloat attribute is used",
10363         "loop not vectorized due to NoImplicitFloat attribute",
10364         "NoImplicitFloat", ORE, L);
10365     Hints.emitRemarkWithHints();
10366     return false;
10367   }
10368 
10369   // Check if the target supports potentially unsafe FP vectorization.
10370   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10371   // for the target we're vectorizing for, to make sure none of the
10372   // additional fp-math flags can help.
10373   if (Hints.isPotentiallyUnsafe() &&
10374       TTI->isFPVectorizationPotentiallyUnsafe()) {
10375     reportVectorizationFailure(
10376         "Potentially unsafe FP op prevents vectorization",
10377         "loop not vectorized due to unsafe FP support.",
10378         "UnsafeFP", ORE, L);
10379     Hints.emitRemarkWithHints();
10380     return false;
10381   }
10382 
10383   bool AllowOrderedReductions;
10384   // If the flag is set, use that instead and override the TTI behaviour.
10385   if (ForceOrderedReductions.getNumOccurrences() > 0)
10386     AllowOrderedReductions = ForceOrderedReductions;
10387   else
10388     AllowOrderedReductions = TTI->enableOrderedReductions();
10389   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10390     ORE->emit([&]() {
10391       auto *ExactFPMathInst = Requirements.getExactFPInst();
10392       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10393                                                  ExactFPMathInst->getDebugLoc(),
10394                                                  ExactFPMathInst->getParent())
10395              << "loop not vectorized: cannot prove it is safe to reorder "
10396                 "floating-point operations";
10397     });
10398     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10399                          "reorder floating-point operations\n");
10400     Hints.emitRemarkWithHints();
10401     return false;
10402   }
10403 
10404   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10405   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10406 
10407   // If an override option has been passed in for interleaved accesses, use it.
10408   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10409     UseInterleaved = EnableInterleavedMemAccesses;
10410 
10411   // Analyze interleaved memory accesses.
10412   if (UseInterleaved) {
10413     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10414   }
10415 
10416   // Use the cost model.
10417   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10418                                 F, &Hints, IAI);
10419   CM.collectValuesToIgnore();
10420   CM.collectElementTypesForWidening();
10421 
10422   // Use the planner for vectorization.
10423   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10424                                Requirements, ORE);
10425 
10426   // Get user vectorization factor and interleave count.
10427   ElementCount UserVF = Hints.getWidth();
10428   unsigned UserIC = Hints.getInterleave();
10429 
10430   // Plan how to best vectorize, return the best VF and its cost.
10431   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10432 
10433   VectorizationFactor VF = VectorizationFactor::Disabled();
10434   unsigned IC = 1;
10435 
10436   if (MaybeVF) {
10437     VF = *MaybeVF;
10438     // Select the interleave count.
10439     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10440   }
10441 
10442   // Identify the diagnostic messages that should be produced.
10443   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10444   bool VectorizeLoop = true, InterleaveLoop = true;
10445   if (VF.Width.isScalar()) {
10446     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10447     VecDiagMsg = std::make_pair(
10448         "VectorizationNotBeneficial",
10449         "the cost-model indicates that vectorization is not beneficial");
10450     VectorizeLoop = false;
10451   }
10452 
10453   if (!MaybeVF && UserIC > 1) {
10454     // Tell the user interleaving was avoided up-front, despite being explicitly
10455     // requested.
10456     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10457                          "interleaving should be avoided up front\n");
10458     IntDiagMsg = std::make_pair(
10459         "InterleavingAvoided",
10460         "Ignoring UserIC, because interleaving was avoided up front");
10461     InterleaveLoop = false;
10462   } else if (IC == 1 && UserIC <= 1) {
10463     // Tell the user interleaving is not beneficial.
10464     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10465     IntDiagMsg = std::make_pair(
10466         "InterleavingNotBeneficial",
10467         "the cost-model indicates that interleaving is not beneficial");
10468     InterleaveLoop = false;
10469     if (UserIC == 1) {
10470       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10471       IntDiagMsg.second +=
10472           " and is explicitly disabled or interleave count is set to 1";
10473     }
10474   } else if (IC > 1 && UserIC == 1) {
10475     // Tell the user interleaving is beneficial, but it explicitly disabled.
10476     LLVM_DEBUG(
10477         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10478     IntDiagMsg = std::make_pair(
10479         "InterleavingBeneficialButDisabled",
10480         "the cost-model indicates that interleaving is beneficial "
10481         "but is explicitly disabled or interleave count is set to 1");
10482     InterleaveLoop = false;
10483   }
10484 
10485   // Override IC if user provided an interleave count.
10486   IC = UserIC > 0 ? UserIC : IC;
10487 
10488   // Emit diagnostic messages, if any.
10489   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10490   if (!VectorizeLoop && !InterleaveLoop) {
10491     // Do not vectorize or interleaving the loop.
10492     ORE->emit([&]() {
10493       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10494                                       L->getStartLoc(), L->getHeader())
10495              << VecDiagMsg.second;
10496     });
10497     ORE->emit([&]() {
10498       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10499                                       L->getStartLoc(), L->getHeader())
10500              << IntDiagMsg.second;
10501     });
10502     return false;
10503   } else if (!VectorizeLoop && InterleaveLoop) {
10504     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10505     ORE->emit([&]() {
10506       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10507                                         L->getStartLoc(), L->getHeader())
10508              << VecDiagMsg.second;
10509     });
10510   } else if (VectorizeLoop && !InterleaveLoop) {
10511     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10512                       << ") in " << DebugLocStr << '\n');
10513     ORE->emit([&]() {
10514       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10515                                         L->getStartLoc(), L->getHeader())
10516              << IntDiagMsg.second;
10517     });
10518   } else if (VectorizeLoop && InterleaveLoop) {
10519     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10520                       << ") in " << DebugLocStr << '\n');
10521     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10522   }
10523 
10524   bool DisableRuntimeUnroll = false;
10525   MDNode *OrigLoopID = L->getLoopID();
10526   {
10527     // Optimistically generate runtime checks. Drop them if they turn out to not
10528     // be profitable. Limit the scope of Checks, so the cleanup happens
10529     // immediately after vector codegeneration is done.
10530     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10531                              F->getParent()->getDataLayout());
10532     if (!VF.Width.isScalar() || IC > 1)
10533       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10534 
10535     using namespace ore;
10536     if (!VectorizeLoop) {
10537       assert(IC > 1 && "interleave count should not be 1 or 0");
10538       // If we decided that it is not legal to vectorize the loop, then
10539       // interleave it.
10540       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10541                                  &CM, BFI, PSI, Checks);
10542 
10543       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10544       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10545 
10546       ORE->emit([&]() {
10547         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10548                                   L->getHeader())
10549                << "interleaved loop (interleaved count: "
10550                << NV("InterleaveCount", IC) << ")";
10551       });
10552     } else {
10553       // If we decided that it is *legal* to vectorize the loop, then do it.
10554 
10555       // Consider vectorizing the epilogue too if it's profitable.
10556       VectorizationFactor EpilogueVF =
10557           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10558       if (EpilogueVF.Width.isVector()) {
10559 
10560         // The first pass vectorizes the main loop and creates a scalar epilogue
10561         // to be vectorized by executing the plan (potentially with a different
10562         // factor) again shortly afterwards.
10563         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10564         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10565                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10566 
10567         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10568         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10569                         DT);
10570         ++LoopsVectorized;
10571 
10572         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10573         formLCSSARecursively(*L, *DT, LI, SE);
10574 
10575         // Second pass vectorizes the epilogue and adjusts the control flow
10576         // edges from the first pass.
10577         EPI.MainLoopVF = EPI.EpilogueVF;
10578         EPI.MainLoopUF = EPI.EpilogueUF;
10579         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10580                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10581                                                  Checks);
10582 
10583         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10584         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10585                         DT);
10586         ++LoopsEpilogueVectorized;
10587 
10588         if (!MainILV.areSafetyChecksAdded())
10589           DisableRuntimeUnroll = true;
10590       } else {
10591         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10592                                &LVL, &CM, BFI, PSI, Checks);
10593 
10594         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10595         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10596         ++LoopsVectorized;
10597 
10598         // Add metadata to disable runtime unrolling a scalar loop when there
10599         // are no runtime checks about strides and memory. A scalar loop that is
10600         // rarely used is not worth unrolling.
10601         if (!LB.areSafetyChecksAdded())
10602           DisableRuntimeUnroll = true;
10603       }
10604       // Report the vectorization decision.
10605       ORE->emit([&]() {
10606         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10607                                   L->getHeader())
10608                << "vectorized loop (vectorization width: "
10609                << NV("VectorizationFactor", VF.Width)
10610                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10611       });
10612     }
10613 
10614     if (ORE->allowExtraAnalysis(LV_NAME))
10615       checkMixedPrecision(L, ORE);
10616   }
10617 
10618   Optional<MDNode *> RemainderLoopID =
10619       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10620                                       LLVMLoopVectorizeFollowupEpilogue});
10621   if (RemainderLoopID.hasValue()) {
10622     L->setLoopID(RemainderLoopID.getValue());
10623   } else {
10624     if (DisableRuntimeUnroll)
10625       AddRuntimeUnrollDisableMetaData(L);
10626 
10627     // Mark the loop as already vectorized to avoid vectorizing again.
10628     Hints.setAlreadyVectorized();
10629   }
10630 
10631   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10632   return true;
10633 }
10634 
10635 LoopVectorizeResult LoopVectorizePass::runImpl(
10636     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10637     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10638     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10639     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10640     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10641   SE = &SE_;
10642   LI = &LI_;
10643   TTI = &TTI_;
10644   DT = &DT_;
10645   BFI = &BFI_;
10646   TLI = TLI_;
10647   AA = &AA_;
10648   AC = &AC_;
10649   GetLAA = &GetLAA_;
10650   DB = &DB_;
10651   ORE = &ORE_;
10652   PSI = PSI_;
10653 
10654   // Don't attempt if
10655   // 1. the target claims to have no vector registers, and
10656   // 2. interleaving won't help ILP.
10657   //
10658   // The second condition is necessary because, even if the target has no
10659   // vector registers, loop vectorization may still enable scalar
10660   // interleaving.
10661   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10662       TTI->getMaxInterleaveFactor(1) < 2)
10663     return LoopVectorizeResult(false, false);
10664 
10665   bool Changed = false, CFGChanged = false;
10666 
10667   // The vectorizer requires loops to be in simplified form.
10668   // Since simplification may add new inner loops, it has to run before the
10669   // legality and profitability checks. This means running the loop vectorizer
10670   // will simplify all loops, regardless of whether anything end up being
10671   // vectorized.
10672   for (auto &L : *LI)
10673     Changed |= CFGChanged |=
10674         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10675 
10676   // Build up a worklist of inner-loops to vectorize. This is necessary as
10677   // the act of vectorizing or partially unrolling a loop creates new loops
10678   // and can invalidate iterators across the loops.
10679   SmallVector<Loop *, 8> Worklist;
10680 
10681   for (Loop *L : *LI)
10682     collectSupportedLoops(*L, LI, ORE, Worklist);
10683 
10684   LoopsAnalyzed += Worklist.size();
10685 
10686   // Now walk the identified inner loops.
10687   while (!Worklist.empty()) {
10688     Loop *L = Worklist.pop_back_val();
10689 
10690     // For the inner loops we actually process, form LCSSA to simplify the
10691     // transform.
10692     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10693 
10694     Changed |= CFGChanged |= processLoop(L);
10695   }
10696 
10697   // Process each loop nest in the function.
10698   return LoopVectorizeResult(Changed, CFGChanged);
10699 }
10700 
10701 PreservedAnalyses LoopVectorizePass::run(Function &F,
10702                                          FunctionAnalysisManager &AM) {
10703     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10704     auto &LI = AM.getResult<LoopAnalysis>(F);
10705     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10706     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10707     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10708     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10709     auto &AA = AM.getResult<AAManager>(F);
10710     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10711     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10712     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10713 
10714     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10715     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10716         [&](Loop &L) -> const LoopAccessInfo & {
10717       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10718                                         TLI, TTI, nullptr, nullptr, nullptr};
10719       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10720     };
10721     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10722     ProfileSummaryInfo *PSI =
10723         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10724     LoopVectorizeResult Result =
10725         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10726     if (!Result.MadeAnyChange)
10727       return PreservedAnalyses::all();
10728     PreservedAnalyses PA;
10729 
10730     // We currently do not preserve loopinfo/dominator analyses with outer loop
10731     // vectorization. Until this is addressed, mark these analyses as preserved
10732     // only for non-VPlan-native path.
10733     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10734     if (!EnableVPlanNativePath) {
10735       PA.preserve<LoopAnalysis>();
10736       PA.preserve<DominatorTreeAnalysis>();
10737     }
10738 
10739     if (Result.MadeCFGChange) {
10740       // Making CFG changes likely means a loop got vectorized. Indicate that
10741       // extra simplification passes should be run.
10742       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10743       // be run if runtime checks have been added.
10744       AM.getResult<ShouldRunExtraVectorPasses>(F);
10745       PA.preserve<ShouldRunExtraVectorPasses>();
10746     } else {
10747       PA.preserveSet<CFGAnalyses>();
10748     }
10749     return PA;
10750 }
10751 
10752 void LoopVectorizePass::printPipeline(
10753     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10754   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10755       OS, MapClassName2PassName);
10756 
10757   OS << "<";
10758   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10759   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10760   OS << ">";
10761 }
10762