1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 static cl::opt<bool> EnableIndVarRegisterHeur(
311     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
312     cl::desc("Count the induction variable only once when interleaving"));
313 
314 static cl::opt<bool> EnableCondStoresVectorization(
315     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
316     cl::desc("Enable if predication of stores during vectorization."));
317 
318 static cl::opt<unsigned> MaxNestedScalarReductionIC(
319     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
320     cl::desc("The maximum interleave count to use when interleaving a scalar "
321              "reduction in a nested loop."));
322 
323 static cl::opt<bool>
324     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
325                            cl::Hidden,
326                            cl::desc("Prefer in-loop vector reductions, "
327                                     "overriding the targets preference."));
328 
329 static cl::opt<bool> ForceOrderedReductions(
330     "force-ordered-reductions", cl::init(false), cl::Hidden,
331     cl::desc("Enable the vectorisation of loops with in-order (strict) "
332              "FP reductions"));
333 
334 static cl::opt<bool> PreferPredicatedReductionSelect(
335     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
336     cl::desc(
337         "Prefer predicating a reduction operation over an after loop select."));
338 
339 cl::opt<bool> EnableVPlanNativePath(
340     "enable-vplan-native-path", cl::init(false), cl::Hidden,
341     cl::desc("Enable VPlan-native vectorization path with "
342              "support for outer loop vectorization."));
343 
344 // FIXME: Remove this switch once we have divergence analysis. Currently we
345 // assume divergent non-backedge branches when this switch is true.
346 cl::opt<bool> EnableVPlanPredication(
347     "enable-vplan-predication", cl::init(false), cl::Hidden,
348     cl::desc("Enable VPlan-native vectorization path predicator with "
349              "support for outer loop vectorization."));
350 
351 // This flag enables the stress testing of the VPlan H-CFG construction in the
352 // VPlan-native vectorization path. It must be used in conjuction with
353 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
354 // verification of the H-CFGs built.
355 static cl::opt<bool> VPlanBuildStressTest(
356     "vplan-build-stress-test", cl::init(false), cl::Hidden,
357     cl::desc(
358         "Build VPlan for every supported loop nest in the function and bail "
359         "out right after the build (stress test the VPlan H-CFG construction "
360         "in the VPlan-native vectorization path)."));
361 
362 cl::opt<bool> llvm::EnableLoopInterleaving(
363     "interleave-loops", cl::init(true), cl::Hidden,
364     cl::desc("Enable loop interleaving in Loop vectorization passes"));
365 cl::opt<bool> llvm::EnableLoopVectorization(
366     "vectorize-loops", cl::init(true), cl::Hidden,
367     cl::desc("Run the Loop vectorization passes"));
368 
369 cl::opt<bool> PrintVPlansInDotFormat(
370     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
371     cl::desc("Use dot format instead of plain text when dumping VPlans"));
372 
373 /// A helper function that returns true if the given type is irregular. The
374 /// type is irregular if its allocated size doesn't equal the store size of an
375 /// element of the corresponding vector type.
376 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
377   // Determine if an array of N elements of type Ty is "bitcast compatible"
378   // with a <N x Ty> vector.
379   // This is only true if there is no padding between the array elements.
380   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
381 }
382 
383 /// A helper function that returns the reciprocal of the block probability of
384 /// predicated blocks. If we return X, we are assuming the predicated block
385 /// will execute once for every X iterations of the loop header.
386 ///
387 /// TODO: We should use actual block probability here, if available. Currently,
388 ///       we always assume predicated blocks have a 50% chance of executing.
389 static unsigned getReciprocalPredBlockProb() { return 2; }
390 
391 /// A helper function that returns an integer or floating-point constant with
392 /// value C.
393 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
394   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
395                            : ConstantFP::get(Ty, C);
396 }
397 
398 /// Returns "best known" trip count for the specified loop \p L as defined by
399 /// the following procedure:
400 ///   1) Returns exact trip count if it is known.
401 ///   2) Returns expected trip count according to profile data if any.
402 ///   3) Returns upper bound estimate if it is known.
403 ///   4) Returns None if all of the above failed.
404 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
405   // Check if exact trip count is known.
406   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
407     return ExpectedTC;
408 
409   // Check if there is an expected trip count available from profile data.
410   if (LoopVectorizeWithBlockFrequency)
411     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
412       return EstimatedTC;
413 
414   // Check if upper bound estimate is known.
415   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
416     return ExpectedTC;
417 
418   return None;
419 }
420 
421 // Forward declare GeneratedRTChecks.
422 class GeneratedRTChecks;
423 
424 namespace llvm {
425 
426 AnalysisKey ShouldRunExtraVectorPasses::Key;
427 
428 /// InnerLoopVectorizer vectorizes loops which contain only one basic
429 /// block to a specified vectorization factor (VF).
430 /// This class performs the widening of scalars into vectors, or multiple
431 /// scalars. This class also implements the following features:
432 /// * It inserts an epilogue loop for handling loops that don't have iteration
433 ///   counts that are known to be a multiple of the vectorization factor.
434 /// * It handles the code generation for reduction variables.
435 /// * Scalarization (implementation using scalars) of un-vectorizable
436 ///   instructions.
437 /// InnerLoopVectorizer does not perform any vectorization-legality
438 /// checks, and relies on the caller to check for the different legality
439 /// aspects. The InnerLoopVectorizer relies on the
440 /// LoopVectorizationLegality class to provide information about the induction
441 /// and reduction variables that were found to a given vectorization factor.
442 class InnerLoopVectorizer {
443 public:
444   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
445                       LoopInfo *LI, DominatorTree *DT,
446                       const TargetLibraryInfo *TLI,
447                       const TargetTransformInfo *TTI, AssumptionCache *AC,
448                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
449                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
450                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
451                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
452       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
453         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
454         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
455         PSI(PSI), RTChecks(RTChecks) {
456     // Query this against the original loop and save it here because the profile
457     // of the original loop header may change as the transformation happens.
458     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
459         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
460   }
461 
462   virtual ~InnerLoopVectorizer() = default;
463 
464   /// Create a new empty loop that will contain vectorized instructions later
465   /// on, while the old loop will be used as the scalar remainder. Control flow
466   /// is generated around the vectorized (and scalar epilogue) loops consisting
467   /// of various checks and bypasses. Return the pre-header block of the new
468   /// loop and the start value for the canonical induction, if it is != 0. The
469   /// latter is the case when vectorizing the epilogue loop. In the case of
470   /// epilogue vectorization, this function is overriden to handle the more
471   /// complex control flow around the loops.
472   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
473 
474   /// Widen a single call instruction within the innermost loop.
475   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
476                             VPTransformState &State);
477 
478   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
479   void fixVectorizedLoop(VPTransformState &State);
480 
481   // Return true if any runtime check is added.
482   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
483 
484   /// A type for vectorized values in the new loop. Each value from the
485   /// original loop, when vectorized, is represented by UF vector values in the
486   /// new unrolled loop, where UF is the unroll factor.
487   using VectorParts = SmallVector<Value *, 2>;
488 
489   /// Vectorize a single first-order recurrence or pointer induction PHINode in
490   /// a block. This method handles the induction variable canonicalization. It
491   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
492   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
493                            VPTransformState &State);
494 
495   /// A helper function to scalarize a single Instruction in the innermost loop.
496   /// Generates a sequence of scalar instances for each lane between \p MinLane
497   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
498   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
499   /// Instr's operands.
500   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
501                             const VPIteration &Instance, bool IfPredicateInstr,
502                             VPTransformState &State);
503 
504   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
505   /// is provided, the integer induction variable will first be truncated to
506   /// the corresponding type. \p CanonicalIV is the scalar value generated for
507   /// the canonical induction variable.
508   void widenIntOrFpInduction(PHINode *IV, VPWidenIntOrFpInductionRecipe *Def,
509                              VPTransformState &State, Value *CanonicalIV);
510 
511   /// Construct the vector value of a scalarized value \p V one lane at a time.
512   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
513                                  VPTransformState &State);
514 
515   /// Try to vectorize interleaved access group \p Group with the base address
516   /// given in \p Addr, optionally masking the vector operations if \p
517   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
518   /// values in the vectorized loop.
519   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
520                                 ArrayRef<VPValue *> VPDefs,
521                                 VPTransformState &State, VPValue *Addr,
522                                 ArrayRef<VPValue *> StoredValues,
523                                 VPValue *BlockInMask = nullptr);
524 
525   /// Set the debug location in the builder \p Ptr using the debug location in
526   /// \p V. If \p Ptr is None then it uses the class member's Builder.
527   void setDebugLocFromInst(const Value *V,
528                            Optional<IRBuilder<> *> CustomBuilder = None);
529 
530   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
531   void fixNonInductionPHIs(VPTransformState &State);
532 
533   /// Returns true if the reordering of FP operations is not allowed, but we are
534   /// able to vectorize with strict in-order reductions for the given RdxDesc.
535   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
536 
537   /// Create a broadcast instruction. This method generates a broadcast
538   /// instruction (shuffle) for loop invariant values and for the induction
539   /// value. If this is the induction variable then we extend it to N, N+1, ...
540   /// this is needed because each iteration in the loop corresponds to a SIMD
541   /// element.
542   virtual Value *getBroadcastInstrs(Value *V);
543 
544   /// Add metadata from one instruction to another.
545   ///
546   /// This includes both the original MDs from \p From and additional ones (\see
547   /// addNewMetadata).  Use this for *newly created* instructions in the vector
548   /// loop.
549   void addMetadata(Instruction *To, Instruction *From);
550 
551   /// Similar to the previous function but it adds the metadata to a
552   /// vector of instructions.
553   void addMetadata(ArrayRef<Value *> To, Instruction *From);
554 
555   // Returns the resume value (bc.merge.rdx) for a reduction as
556   // generated by fixReduction.
557   PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
558 
559 protected:
560   friend class LoopVectorizationPlanner;
561 
562   /// A small list of PHINodes.
563   using PhiVector = SmallVector<PHINode *, 4>;
564 
565   /// A type for scalarized values in the new loop. Each value from the
566   /// original loop, when scalarized, is represented by UF x VF scalar values
567   /// in the new unrolled loop, where UF is the unroll factor and VF is the
568   /// vectorization factor.
569   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
570 
571   /// Set up the values of the IVs correctly when exiting the vector loop.
572   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
573                     Value *CountRoundDown, Value *EndValue,
574                     BasicBlock *MiddleBlock);
575 
576   /// Introduce a conditional branch (on true, condition to be set later) at the
577   /// end of the header=latch connecting it to itself (across the backedge) and
578   /// to the exit block of \p L.
579   void createHeaderBranch(Loop *L);
580 
581   /// Handle all cross-iteration phis in the header.
582   void fixCrossIterationPHIs(VPTransformState &State);
583 
584   /// Create the exit value of first order recurrences in the middle block and
585   /// update their users.
586   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
587                                VPTransformState &State);
588 
589   /// Create code for the loop exit value of the reduction.
590   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
591 
592   /// Clear NSW/NUW flags from reduction instructions if necessary.
593   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
594                                VPTransformState &State);
595 
596   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
597   /// means we need to add the appropriate incoming value from the middle
598   /// block as exiting edges from the scalar epilogue loop (if present) are
599   /// already in place, and we exit the vector loop exclusively to the middle
600   /// block.
601   void fixLCSSAPHIs(VPTransformState &State);
602 
603   /// Iteratively sink the scalarized operands of a predicated instruction into
604   /// the block that was created for it.
605   void sinkScalarOperands(Instruction *PredInst);
606 
607   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
608   /// represented as.
609   void truncateToMinimalBitwidths(VPTransformState &State);
610 
611   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
612   /// variable on which to base the steps, \p Step is the size of the step, and
613   /// \p EntryVal is the value from the original loop that maps to the steps.
614   /// Note that \p EntryVal doesn't have to be an induction variable - it
615   /// can also be a truncate instruction.
616   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
617                         const InductionDescriptor &ID, VPValue *Def,
618                         VPTransformState &State);
619 
620   /// Create a vector induction phi node based on an existing scalar one. \p
621   /// EntryVal is the value from the original loop that maps to the vector phi
622   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
623   /// truncate instruction, instead of widening the original IV, we widen a
624   /// version of the IV truncated to \p EntryVal's type.
625   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
626                                        Value *Step, Value *Start,
627                                        Instruction *EntryVal, VPValue *Def,
628                                        VPTransformState &State);
629 
630   /// Returns (and creates if needed) the original loop trip count.
631   Value *getOrCreateTripCount(Loop *NewLoop);
632 
633   /// Returns (and creates if needed) the trip count of the widened loop.
634   Value *getOrCreateVectorTripCount(Loop *NewLoop);
635 
636   /// Returns a bitcasted value to the requested vector type.
637   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
638   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
639                                 const DataLayout &DL);
640 
641   /// Emit a bypass check to see if the vector trip count is zero, including if
642   /// it overflows.
643   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
644 
645   /// Emit a bypass check to see if all of the SCEV assumptions we've
646   /// had to make are correct. Returns the block containing the checks or
647   /// nullptr if no checks have been added.
648   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
649 
650   /// Emit bypass checks to check any memory assumptions we may have made.
651   /// Returns the block containing the checks or nullptr if no checks have been
652   /// added.
653   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
654 
655   /// Compute the transformed value of Index at offset StartValue using step
656   /// StepValue.
657   /// For integer induction, returns StartValue + Index * StepValue.
658   /// For pointer induction, returns StartValue[Index * StepValue].
659   /// FIXME: The newly created binary instructions should contain nsw/nuw
660   /// flags, which can be found from the original scalar operations.
661   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
662                               const DataLayout &DL,
663                               const InductionDescriptor &ID,
664                               BasicBlock *VectorHeader) const;
665 
666   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
667   /// vector loop preheader, middle block and scalar preheader. Also
668   /// allocate a loop object for the new vector loop and return it.
669   Loop *createVectorLoopSkeleton(StringRef Prefix);
670 
671   /// Create new phi nodes for the induction variables to resume iteration count
672   /// in the scalar epilogue, from where the vectorized loop left off.
673   /// In cases where the loop skeleton is more complicated (eg. epilogue
674   /// vectorization) and the resume values can come from an additional bypass
675   /// block, the \p AdditionalBypass pair provides information about the bypass
676   /// block and the end value on the edge from bypass to this loop.
677   void createInductionResumeValues(
678       Loop *L,
679       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
680 
681   /// Complete the loop skeleton by adding debug MDs, creating appropriate
682   /// conditional branches in the middle block, preparing the builder and
683   /// running the verifier. Take in the vector loop \p L as argument, and return
684   /// the preheader of the completed vector loop.
685   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
686 
687   /// Add additional metadata to \p To that was not present on \p Orig.
688   ///
689   /// Currently this is used to add the noalias annotations based on the
690   /// inserted memchecks.  Use this for instructions that are *cloned* into the
691   /// vector loop.
692   void addNewMetadata(Instruction *To, const Instruction *Orig);
693 
694   /// Collect poison-generating recipes that may generate a poison value that is
695   /// used after vectorization, even when their operands are not poison. Those
696   /// recipes meet the following conditions:
697   ///  * Contribute to the address computation of a recipe generating a widen
698   ///    memory load/store (VPWidenMemoryInstructionRecipe or
699   ///    VPInterleaveRecipe).
700   ///  * Such a widen memory load/store has at least one underlying Instruction
701   ///    that is in a basic block that needs predication and after vectorization
702   ///    the generated instruction won't be predicated.
703   void collectPoisonGeneratingRecipes(VPTransformState &State);
704 
705   /// Allow subclasses to override and print debug traces before/after vplan
706   /// execution, when trace information is requested.
707   virtual void printDebugTracesAtStart(){};
708   virtual void printDebugTracesAtEnd(){};
709 
710   /// The original loop.
711   Loop *OrigLoop;
712 
713   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
714   /// dynamic knowledge to simplify SCEV expressions and converts them to a
715   /// more usable form.
716   PredicatedScalarEvolution &PSE;
717 
718   /// Loop Info.
719   LoopInfo *LI;
720 
721   /// Dominator Tree.
722   DominatorTree *DT;
723 
724   /// Alias Analysis.
725   AAResults *AA;
726 
727   /// Target Library Info.
728   const TargetLibraryInfo *TLI;
729 
730   /// Target Transform Info.
731   const TargetTransformInfo *TTI;
732 
733   /// Assumption Cache.
734   AssumptionCache *AC;
735 
736   /// Interface to emit optimization remarks.
737   OptimizationRemarkEmitter *ORE;
738 
739   /// LoopVersioning.  It's only set up (non-null) if memchecks were
740   /// used.
741   ///
742   /// This is currently only used to add no-alias metadata based on the
743   /// memchecks.  The actually versioning is performed manually.
744   std::unique_ptr<LoopVersioning> LVer;
745 
746   /// The vectorization SIMD factor to use. Each vector will have this many
747   /// vector elements.
748   ElementCount VF;
749 
750   /// The vectorization unroll factor to use. Each scalar is vectorized to this
751   /// many different vector instructions.
752   unsigned UF;
753 
754   /// The builder that we use
755   IRBuilder<> Builder;
756 
757   // --- Vectorization state ---
758 
759   /// The vector-loop preheader.
760   BasicBlock *LoopVectorPreHeader;
761 
762   /// The scalar-loop preheader.
763   BasicBlock *LoopScalarPreHeader;
764 
765   /// Middle Block between the vector and the scalar.
766   BasicBlock *LoopMiddleBlock;
767 
768   /// The unique ExitBlock of the scalar loop if one exists.  Note that
769   /// there can be multiple exiting edges reaching this block.
770   BasicBlock *LoopExitBlock;
771 
772   /// The vector loop body.
773   BasicBlock *LoopVectorBody;
774 
775   /// The scalar loop body.
776   BasicBlock *LoopScalarBody;
777 
778   /// A list of all bypass blocks. The first block is the entry of the loop.
779   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
780 
781   /// Store instructions that were predicated.
782   SmallVector<Instruction *, 4> PredicatedInstructions;
783 
784   /// Trip count of the original loop.
785   Value *TripCount = nullptr;
786 
787   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
788   Value *VectorTripCount = nullptr;
789 
790   /// The legality analysis.
791   LoopVectorizationLegality *Legal;
792 
793   /// The profitablity analysis.
794   LoopVectorizationCostModel *Cost;
795 
796   // Record whether runtime checks are added.
797   bool AddedSafetyChecks = false;
798 
799   // Holds the end values for each induction variable. We save the end values
800   // so we can later fix-up the external users of the induction variables.
801   DenseMap<PHINode *, Value *> IVEndValues;
802 
803   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
804   // fixed up at the end of vector code generation.
805   SmallVector<PHINode *, 8> OrigPHIsToFix;
806 
807   /// BFI and PSI are used to check for profile guided size optimizations.
808   BlockFrequencyInfo *BFI;
809   ProfileSummaryInfo *PSI;
810 
811   // Whether this loop should be optimized for size based on profile guided size
812   // optimizatios.
813   bool OptForSizeBasedOnProfile;
814 
815   /// Structure to hold information about generated runtime checks, responsible
816   /// for cleaning the checks, if vectorization turns out unprofitable.
817   GeneratedRTChecks &RTChecks;
818 
819   // Holds the resume values for reductions in the loops, used to set the
820   // correct start value of reduction PHIs when vectorizing the epilogue.
821   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
822       ReductionResumeValues;
823 };
824 
825 class InnerLoopUnroller : public InnerLoopVectorizer {
826 public:
827   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
828                     LoopInfo *LI, DominatorTree *DT,
829                     const TargetLibraryInfo *TLI,
830                     const TargetTransformInfo *TTI, AssumptionCache *AC,
831                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
832                     LoopVectorizationLegality *LVL,
833                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
834                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
835       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
836                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
837                             BFI, PSI, Check) {}
838 
839 private:
840   Value *getBroadcastInstrs(Value *V) override;
841 };
842 
843 /// Encapsulate information regarding vectorization of a loop and its epilogue.
844 /// This information is meant to be updated and used across two stages of
845 /// epilogue vectorization.
846 struct EpilogueLoopVectorizationInfo {
847   ElementCount MainLoopVF = ElementCount::getFixed(0);
848   unsigned MainLoopUF = 0;
849   ElementCount EpilogueVF = ElementCount::getFixed(0);
850   unsigned EpilogueUF = 0;
851   BasicBlock *MainLoopIterationCountCheck = nullptr;
852   BasicBlock *EpilogueIterationCountCheck = nullptr;
853   BasicBlock *SCEVSafetyCheck = nullptr;
854   BasicBlock *MemSafetyCheck = nullptr;
855   Value *TripCount = nullptr;
856   Value *VectorTripCount = nullptr;
857 
858   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
859                                 ElementCount EVF, unsigned EUF)
860       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
861     assert(EUF == 1 &&
862            "A high UF for the epilogue loop is likely not beneficial.");
863   }
864 };
865 
866 /// An extension of the inner loop vectorizer that creates a skeleton for a
867 /// vectorized loop that has its epilogue (residual) also vectorized.
868 /// The idea is to run the vplan on a given loop twice, firstly to setup the
869 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
870 /// from the first step and vectorize the epilogue.  This is achieved by
871 /// deriving two concrete strategy classes from this base class and invoking
872 /// them in succession from the loop vectorizer planner.
873 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
874 public:
875   InnerLoopAndEpilogueVectorizer(
876       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
877       DominatorTree *DT, const TargetLibraryInfo *TLI,
878       const TargetTransformInfo *TTI, AssumptionCache *AC,
879       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
880       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
881       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
882       GeneratedRTChecks &Checks)
883       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
884                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
885                             Checks),
886         EPI(EPI) {}
887 
888   // Override this function to handle the more complex control flow around the
889   // three loops.
890   std::pair<BasicBlock *, Value *>
891   createVectorizedLoopSkeleton() final override {
892     return createEpilogueVectorizedLoopSkeleton();
893   }
894 
895   /// The interface for creating a vectorized skeleton using one of two
896   /// different strategies, each corresponding to one execution of the vplan
897   /// as described above.
898   virtual std::pair<BasicBlock *, Value *>
899   createEpilogueVectorizedLoopSkeleton() = 0;
900 
901   /// Holds and updates state information required to vectorize the main loop
902   /// and its epilogue in two separate passes. This setup helps us avoid
903   /// regenerating and recomputing runtime safety checks. It also helps us to
904   /// shorten the iteration-count-check path length for the cases where the
905   /// iteration count of the loop is so small that the main vector loop is
906   /// completely skipped.
907   EpilogueLoopVectorizationInfo &EPI;
908 };
909 
910 /// A specialized derived class of inner loop vectorizer that performs
911 /// vectorization of *main* loops in the process of vectorizing loops and their
912 /// epilogues.
913 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
914 public:
915   EpilogueVectorizerMainLoop(
916       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
917       DominatorTree *DT, const TargetLibraryInfo *TLI,
918       const TargetTransformInfo *TTI, AssumptionCache *AC,
919       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
920       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
921       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
922       GeneratedRTChecks &Check)
923       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
924                                        EPI, LVL, CM, BFI, PSI, Check) {}
925   /// Implements the interface for creating a vectorized skeleton using the
926   /// *main loop* strategy (ie the first pass of vplan execution).
927   std::pair<BasicBlock *, Value *>
928   createEpilogueVectorizedLoopSkeleton() final override;
929 
930 protected:
931   /// Emits an iteration count bypass check once for the main loop (when \p
932   /// ForEpilogue is false) and once for the epilogue loop (when \p
933   /// ForEpilogue is true).
934   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
935                                              bool ForEpilogue);
936   void printDebugTracesAtStart() override;
937   void printDebugTracesAtEnd() override;
938 };
939 
940 // A specialized derived class of inner loop vectorizer that performs
941 // vectorization of *epilogue* loops in the process of vectorizing loops and
942 // their epilogues.
943 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
944 public:
945   EpilogueVectorizerEpilogueLoop(
946       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
947       DominatorTree *DT, const TargetLibraryInfo *TLI,
948       const TargetTransformInfo *TTI, AssumptionCache *AC,
949       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
950       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
951       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
952       GeneratedRTChecks &Checks)
953       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
954                                        EPI, LVL, CM, BFI, PSI, Checks) {}
955   /// Implements the interface for creating a vectorized skeleton using the
956   /// *epilogue loop* strategy (ie the second pass of vplan execution).
957   std::pair<BasicBlock *, Value *>
958   createEpilogueVectorizedLoopSkeleton() final override;
959 
960 protected:
961   /// Emits an iteration count bypass check after the main vector loop has
962   /// finished to see if there are any iterations left to execute by either
963   /// the vector epilogue or the scalar epilogue.
964   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
965                                                       BasicBlock *Bypass,
966                                                       BasicBlock *Insert);
967   void printDebugTracesAtStart() override;
968   void printDebugTracesAtEnd() override;
969 };
970 } // end namespace llvm
971 
972 /// Look for a meaningful debug location on the instruction or it's
973 /// operands.
974 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
975   if (!I)
976     return I;
977 
978   DebugLoc Empty;
979   if (I->getDebugLoc() != Empty)
980     return I;
981 
982   for (Use &Op : I->operands()) {
983     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
984       if (OpInst->getDebugLoc() != Empty)
985         return OpInst;
986   }
987 
988   return I;
989 }
990 
991 void InnerLoopVectorizer::setDebugLocFromInst(
992     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
993   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
994   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
995     const DILocation *DIL = Inst->getDebugLoc();
996 
997     // When a FSDiscriminator is enabled, we don't need to add the multiply
998     // factors to the discriminators.
999     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1000         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1001       // FIXME: For scalable vectors, assume vscale=1.
1002       auto NewDIL =
1003           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1004       if (NewDIL)
1005         B->SetCurrentDebugLocation(NewDIL.getValue());
1006       else
1007         LLVM_DEBUG(dbgs()
1008                    << "Failed to create new discriminator: "
1009                    << DIL->getFilename() << " Line: " << DIL->getLine());
1010     } else
1011       B->SetCurrentDebugLocation(DIL);
1012   } else
1013     B->SetCurrentDebugLocation(DebugLoc());
1014 }
1015 
1016 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1017 /// is passed, the message relates to that particular instruction.
1018 #ifndef NDEBUG
1019 static void debugVectorizationMessage(const StringRef Prefix,
1020                                       const StringRef DebugMsg,
1021                                       Instruction *I) {
1022   dbgs() << "LV: " << Prefix << DebugMsg;
1023   if (I != nullptr)
1024     dbgs() << " " << *I;
1025   else
1026     dbgs() << '.';
1027   dbgs() << '\n';
1028 }
1029 #endif
1030 
1031 /// Create an analysis remark that explains why vectorization failed
1032 ///
1033 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1034 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1035 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1036 /// the location of the remark.  \return the remark object that can be
1037 /// streamed to.
1038 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1039     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1040   Value *CodeRegion = TheLoop->getHeader();
1041   DebugLoc DL = TheLoop->getStartLoc();
1042 
1043   if (I) {
1044     CodeRegion = I->getParent();
1045     // If there is no debug location attached to the instruction, revert back to
1046     // using the loop's.
1047     if (I->getDebugLoc())
1048       DL = I->getDebugLoc();
1049   }
1050 
1051   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1052 }
1053 
1054 namespace llvm {
1055 
1056 /// Return a value for Step multiplied by VF.
1057 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
1058                        int64_t Step) {
1059   assert(Ty->isIntegerTy() && "Expected an integer step");
1060   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1061   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1062 }
1063 
1064 /// Return the runtime value for VF.
1065 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1066   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1067   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1068 }
1069 
1070 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) {
1071   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1072   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1073   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1074   return B.CreateUIToFP(RuntimeVF, FTy);
1075 }
1076 
1077 void reportVectorizationFailure(const StringRef DebugMsg,
1078                                 const StringRef OREMsg, const StringRef ORETag,
1079                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1080                                 Instruction *I) {
1081   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1082   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1083   ORE->emit(
1084       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1085       << "loop not vectorized: " << OREMsg);
1086 }
1087 
1088 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1089                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1090                              Instruction *I) {
1091   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1092   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1093   ORE->emit(
1094       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1095       << Msg);
1096 }
1097 
1098 } // end namespace llvm
1099 
1100 #ifndef NDEBUG
1101 /// \return string containing a file name and a line # for the given loop.
1102 static std::string getDebugLocString(const Loop *L) {
1103   std::string Result;
1104   if (L) {
1105     raw_string_ostream OS(Result);
1106     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1107       LoopDbgLoc.print(OS);
1108     else
1109       // Just print the module name.
1110       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1111     OS.flush();
1112   }
1113   return Result;
1114 }
1115 #endif
1116 
1117 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1118                                          const Instruction *Orig) {
1119   // If the loop was versioned with memchecks, add the corresponding no-alias
1120   // metadata.
1121   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1122     LVer->annotateInstWithNoAlias(To, Orig);
1123 }
1124 
1125 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1126     VPTransformState &State) {
1127 
1128   // Collect recipes in the backward slice of `Root` that may generate a poison
1129   // value that is used after vectorization.
1130   SmallPtrSet<VPRecipeBase *, 16> Visited;
1131   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1132     SmallVector<VPRecipeBase *, 16> Worklist;
1133     Worklist.push_back(Root);
1134 
1135     // Traverse the backward slice of Root through its use-def chain.
1136     while (!Worklist.empty()) {
1137       VPRecipeBase *CurRec = Worklist.back();
1138       Worklist.pop_back();
1139 
1140       if (!Visited.insert(CurRec).second)
1141         continue;
1142 
1143       // Prune search if we find another recipe generating a widen memory
1144       // instruction. Widen memory instructions involved in address computation
1145       // will lead to gather/scatter instructions, which don't need to be
1146       // handled.
1147       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1148           isa<VPInterleaveRecipe>(CurRec) ||
1149           isa<VPCanonicalIVPHIRecipe>(CurRec))
1150         continue;
1151 
1152       // This recipe contributes to the address computation of a widen
1153       // load/store. Collect recipe if its underlying instruction has
1154       // poison-generating flags.
1155       Instruction *Instr = CurRec->getUnderlyingInstr();
1156       if (Instr && Instr->hasPoisonGeneratingFlags())
1157         State.MayGeneratePoisonRecipes.insert(CurRec);
1158 
1159       // Add new definitions to the worklist.
1160       for (VPValue *operand : CurRec->operands())
1161         if (VPDef *OpDef = operand->getDef())
1162           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1163     }
1164   });
1165 
1166   // Traverse all the recipes in the VPlan and collect the poison-generating
1167   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1168   // VPInterleaveRecipe.
1169   auto Iter = depth_first(
1170       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1171   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1172     for (VPRecipeBase &Recipe : *VPBB) {
1173       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1174         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1175         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1176         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1177             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1178           collectPoisonGeneratingInstrsInBackwardSlice(
1179               cast<VPRecipeBase>(AddrDef));
1180       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1181         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1182         if (AddrDef) {
1183           // Check if any member of the interleave group needs predication.
1184           const InterleaveGroup<Instruction> *InterGroup =
1185               InterleaveRec->getInterleaveGroup();
1186           bool NeedPredication = false;
1187           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1188                I < NumMembers; ++I) {
1189             Instruction *Member = InterGroup->getMember(I);
1190             if (Member)
1191               NeedPredication |=
1192                   Legal->blockNeedsPredication(Member->getParent());
1193           }
1194 
1195           if (NeedPredication)
1196             collectPoisonGeneratingInstrsInBackwardSlice(
1197                 cast<VPRecipeBase>(AddrDef));
1198         }
1199       }
1200     }
1201   }
1202 }
1203 
1204 void InnerLoopVectorizer::addMetadata(Instruction *To,
1205                                       Instruction *From) {
1206   propagateMetadata(To, From);
1207   addNewMetadata(To, From);
1208 }
1209 
1210 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1211                                       Instruction *From) {
1212   for (Value *V : To) {
1213     if (Instruction *I = dyn_cast<Instruction>(V))
1214       addMetadata(I, From);
1215   }
1216 }
1217 
1218 PHINode *InnerLoopVectorizer::getReductionResumeValue(
1219     const RecurrenceDescriptor &RdxDesc) {
1220   auto It = ReductionResumeValues.find(&RdxDesc);
1221   assert(It != ReductionResumeValues.end() &&
1222          "Expected to find a resume value for the reduction.");
1223   return It->second;
1224 }
1225 
1226 namespace llvm {
1227 
1228 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1229 // lowered.
1230 enum ScalarEpilogueLowering {
1231 
1232   // The default: allowing scalar epilogues.
1233   CM_ScalarEpilogueAllowed,
1234 
1235   // Vectorization with OptForSize: don't allow epilogues.
1236   CM_ScalarEpilogueNotAllowedOptSize,
1237 
1238   // A special case of vectorisation with OptForSize: loops with a very small
1239   // trip count are considered for vectorization under OptForSize, thereby
1240   // making sure the cost of their loop body is dominant, free of runtime
1241   // guards and scalar iteration overheads.
1242   CM_ScalarEpilogueNotAllowedLowTripLoop,
1243 
1244   // Loop hint predicate indicating an epilogue is undesired.
1245   CM_ScalarEpilogueNotNeededUsePredicate,
1246 
1247   // Directive indicating we must either tail fold or not vectorize
1248   CM_ScalarEpilogueNotAllowedUsePredicate
1249 };
1250 
1251 /// ElementCountComparator creates a total ordering for ElementCount
1252 /// for the purposes of using it in a set structure.
1253 struct ElementCountComparator {
1254   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1255     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1256            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1257   }
1258 };
1259 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1260 
1261 /// LoopVectorizationCostModel - estimates the expected speedups due to
1262 /// vectorization.
1263 /// In many cases vectorization is not profitable. This can happen because of
1264 /// a number of reasons. In this class we mainly attempt to predict the
1265 /// expected speedup/slowdowns due to the supported instruction set. We use the
1266 /// TargetTransformInfo to query the different backends for the cost of
1267 /// different operations.
1268 class LoopVectorizationCostModel {
1269 public:
1270   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1271                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1272                              LoopVectorizationLegality *Legal,
1273                              const TargetTransformInfo &TTI,
1274                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1275                              AssumptionCache *AC,
1276                              OptimizationRemarkEmitter *ORE, const Function *F,
1277                              const LoopVectorizeHints *Hints,
1278                              InterleavedAccessInfo &IAI)
1279       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1280         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1281         Hints(Hints), InterleaveInfo(IAI) {}
1282 
1283   /// \return An upper bound for the vectorization factors (both fixed and
1284   /// scalable). If the factors are 0, vectorization and interleaving should be
1285   /// avoided up front.
1286   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1287 
1288   /// \return True if runtime checks are required for vectorization, and false
1289   /// otherwise.
1290   bool runtimeChecksRequired();
1291 
1292   /// \return The most profitable vectorization factor and the cost of that VF.
1293   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1294   /// then this vectorization factor will be selected if vectorization is
1295   /// possible.
1296   VectorizationFactor
1297   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1298 
1299   VectorizationFactor
1300   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1301                                     const LoopVectorizationPlanner &LVP);
1302 
1303   /// Setup cost-based decisions for user vectorization factor.
1304   /// \return true if the UserVF is a feasible VF to be chosen.
1305   bool selectUserVectorizationFactor(ElementCount UserVF) {
1306     collectUniformsAndScalars(UserVF);
1307     collectInstsToScalarize(UserVF);
1308     return expectedCost(UserVF).first.isValid();
1309   }
1310 
1311   /// \return The size (in bits) of the smallest and widest types in the code
1312   /// that needs to be vectorized. We ignore values that remain scalar such as
1313   /// 64 bit loop indices.
1314   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1315 
1316   /// \return The desired interleave count.
1317   /// If interleave count has been specified by metadata it will be returned.
1318   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1319   /// are the selected vectorization factor and the cost of the selected VF.
1320   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1321 
1322   /// Memory access instruction may be vectorized in more than one way.
1323   /// Form of instruction after vectorization depends on cost.
1324   /// This function takes cost-based decisions for Load/Store instructions
1325   /// and collects them in a map. This decisions map is used for building
1326   /// the lists of loop-uniform and loop-scalar instructions.
1327   /// The calculated cost is saved with widening decision in order to
1328   /// avoid redundant calculations.
1329   void setCostBasedWideningDecision(ElementCount VF);
1330 
1331   /// A struct that represents some properties of the register usage
1332   /// of a loop.
1333   struct RegisterUsage {
1334     /// Holds the number of loop invariant values that are used in the loop.
1335     /// The key is ClassID of target-provided register class.
1336     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1337     /// Holds the maximum number of concurrent live intervals in the loop.
1338     /// The key is ClassID of target-provided register class.
1339     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1340   };
1341 
1342   /// \return Returns information about the register usages of the loop for the
1343   /// given vectorization factors.
1344   SmallVector<RegisterUsage, 8>
1345   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1346 
1347   /// Collect values we want to ignore in the cost model.
1348   void collectValuesToIgnore();
1349 
1350   /// Collect all element types in the loop for which widening is needed.
1351   void collectElementTypesForWidening();
1352 
1353   /// Split reductions into those that happen in the loop, and those that happen
1354   /// outside. In loop reductions are collected into InLoopReductionChains.
1355   void collectInLoopReductions();
1356 
1357   /// Returns true if we should use strict in-order reductions for the given
1358   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1359   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1360   /// of FP operations.
1361   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1362     return !Hints->allowReordering() && RdxDesc.isOrdered();
1363   }
1364 
1365   /// \returns The smallest bitwidth each instruction can be represented with.
1366   /// The vector equivalents of these instructions should be truncated to this
1367   /// type.
1368   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1369     return MinBWs;
1370   }
1371 
1372   /// \returns True if it is more profitable to scalarize instruction \p I for
1373   /// vectorization factor \p VF.
1374   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1375     assert(VF.isVector() &&
1376            "Profitable to scalarize relevant only for VF > 1.");
1377 
1378     // Cost model is not run in the VPlan-native path - return conservative
1379     // result until this changes.
1380     if (EnableVPlanNativePath)
1381       return false;
1382 
1383     auto Scalars = InstsToScalarize.find(VF);
1384     assert(Scalars != InstsToScalarize.end() &&
1385            "VF not yet analyzed for scalarization profitability");
1386     return Scalars->second.find(I) != Scalars->second.end();
1387   }
1388 
1389   /// Returns true if \p I is known to be uniform after vectorization.
1390   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1391     if (VF.isScalar())
1392       return true;
1393 
1394     // Cost model is not run in the VPlan-native path - return conservative
1395     // result until this changes.
1396     if (EnableVPlanNativePath)
1397       return false;
1398 
1399     auto UniformsPerVF = Uniforms.find(VF);
1400     assert(UniformsPerVF != Uniforms.end() &&
1401            "VF not yet analyzed for uniformity");
1402     return UniformsPerVF->second.count(I);
1403   }
1404 
1405   /// Returns true if \p I is known to be scalar after vectorization.
1406   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1407     if (VF.isScalar())
1408       return true;
1409 
1410     // Cost model is not run in the VPlan-native path - return conservative
1411     // result until this changes.
1412     if (EnableVPlanNativePath)
1413       return false;
1414 
1415     auto ScalarsPerVF = Scalars.find(VF);
1416     assert(ScalarsPerVF != Scalars.end() &&
1417            "Scalar values are not calculated for VF");
1418     return ScalarsPerVF->second.count(I);
1419   }
1420 
1421   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1422   /// for vectorization factor \p VF.
1423   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1424     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1425            !isProfitableToScalarize(I, VF) &&
1426            !isScalarAfterVectorization(I, VF);
1427   }
1428 
1429   /// Decision that was taken during cost calculation for memory instruction.
1430   enum InstWidening {
1431     CM_Unknown,
1432     CM_Widen,         // For consecutive accesses with stride +1.
1433     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1434     CM_Interleave,
1435     CM_GatherScatter,
1436     CM_Scalarize
1437   };
1438 
1439   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1440   /// instruction \p I and vector width \p VF.
1441   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1442                            InstructionCost Cost) {
1443     assert(VF.isVector() && "Expected VF >=2");
1444     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1445   }
1446 
1447   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1448   /// interleaving group \p Grp and vector width \p VF.
1449   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1450                            ElementCount VF, InstWidening W,
1451                            InstructionCost Cost) {
1452     assert(VF.isVector() && "Expected VF >=2");
1453     /// Broadcast this decicion to all instructions inside the group.
1454     /// But the cost will be assigned to one instruction only.
1455     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1456       if (auto *I = Grp->getMember(i)) {
1457         if (Grp->getInsertPos() == I)
1458           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1459         else
1460           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1461       }
1462     }
1463   }
1464 
1465   /// Return the cost model decision for the given instruction \p I and vector
1466   /// width \p VF. Return CM_Unknown if this instruction did not pass
1467   /// through the cost modeling.
1468   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1469     assert(VF.isVector() && "Expected VF to be a vector VF");
1470     // Cost model is not run in the VPlan-native path - return conservative
1471     // result until this changes.
1472     if (EnableVPlanNativePath)
1473       return CM_GatherScatter;
1474 
1475     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1476     auto Itr = WideningDecisions.find(InstOnVF);
1477     if (Itr == WideningDecisions.end())
1478       return CM_Unknown;
1479     return Itr->second.first;
1480   }
1481 
1482   /// Return the vectorization cost for the given instruction \p I and vector
1483   /// width \p VF.
1484   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1485     assert(VF.isVector() && "Expected VF >=2");
1486     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1487     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1488            "The cost is not calculated");
1489     return WideningDecisions[InstOnVF].second;
1490   }
1491 
1492   /// Return True if instruction \p I is an optimizable truncate whose operand
1493   /// is an induction variable. Such a truncate will be removed by adding a new
1494   /// induction variable with the destination type.
1495   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1496     // If the instruction is not a truncate, return false.
1497     auto *Trunc = dyn_cast<TruncInst>(I);
1498     if (!Trunc)
1499       return false;
1500 
1501     // Get the source and destination types of the truncate.
1502     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1503     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1504 
1505     // If the truncate is free for the given types, return false. Replacing a
1506     // free truncate with an induction variable would add an induction variable
1507     // update instruction to each iteration of the loop. We exclude from this
1508     // check the primary induction variable since it will need an update
1509     // instruction regardless.
1510     Value *Op = Trunc->getOperand(0);
1511     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1512       return false;
1513 
1514     // If the truncated value is not an induction variable, return false.
1515     return Legal->isInductionPhi(Op);
1516   }
1517 
1518   /// Collects the instructions to scalarize for each predicated instruction in
1519   /// the loop.
1520   void collectInstsToScalarize(ElementCount VF);
1521 
1522   /// Collect Uniform and Scalar values for the given \p VF.
1523   /// The sets depend on CM decision for Load/Store instructions
1524   /// that may be vectorized as interleave, gather-scatter or scalarized.
1525   void collectUniformsAndScalars(ElementCount VF) {
1526     // Do the analysis once.
1527     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1528       return;
1529     setCostBasedWideningDecision(VF);
1530     collectLoopUniforms(VF);
1531     collectLoopScalars(VF);
1532   }
1533 
1534   /// Returns true if the target machine supports masked store operation
1535   /// for the given \p DataType and kind of access to \p Ptr.
1536   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1537     return Legal->isConsecutivePtr(DataType, Ptr) &&
1538            TTI.isLegalMaskedStore(DataType, Alignment);
1539   }
1540 
1541   /// Returns true if the target machine supports masked load operation
1542   /// for the given \p DataType and kind of access to \p Ptr.
1543   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1544     return Legal->isConsecutivePtr(DataType, Ptr) &&
1545            TTI.isLegalMaskedLoad(DataType, Alignment);
1546   }
1547 
1548   /// Returns true if the target machine can represent \p V as a masked gather
1549   /// or scatter operation.
1550   bool isLegalGatherOrScatter(Value *V,
1551                               ElementCount VF = ElementCount::getFixed(1)) {
1552     bool LI = isa<LoadInst>(V);
1553     bool SI = isa<StoreInst>(V);
1554     if (!LI && !SI)
1555       return false;
1556     auto *Ty = getLoadStoreType(V);
1557     Align Align = getLoadStoreAlignment(V);
1558     if (VF.isVector())
1559       Ty = VectorType::get(Ty, VF);
1560     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1561            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1562   }
1563 
1564   /// Returns true if the target machine supports all of the reduction
1565   /// variables found for the given VF.
1566   bool canVectorizeReductions(ElementCount VF) const {
1567     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1568       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1569       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1570     }));
1571   }
1572 
1573   /// Returns true if \p I is an instruction that will be scalarized with
1574   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1575   /// instructions include conditional stores and instructions that may divide
1576   /// by zero.
1577   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1578 
1579   // Returns true if \p I is an instruction that will be predicated either
1580   // through scalar predication or masked load/store or masked gather/scatter.
1581   // \p VF is the vectorization factor that will be used to vectorize \p I.
1582   // Superset of instructions that return true for isScalarWithPredication.
1583   bool isPredicatedInst(Instruction *I, ElementCount VF,
1584                         bool IsKnownUniform = false) {
1585     // When we know the load is uniform and the original scalar loop was not
1586     // predicated we don't need to mark it as a predicated instruction. Any
1587     // vectorised blocks created when tail-folding are something artificial we
1588     // have introduced and we know there is always at least one active lane.
1589     // That's why we call Legal->blockNeedsPredication here because it doesn't
1590     // query tail-folding.
1591     if (IsKnownUniform && isa<LoadInst>(I) &&
1592         !Legal->blockNeedsPredication(I->getParent()))
1593       return false;
1594     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1595       return false;
1596     // Loads and stores that need some form of masked operation are predicated
1597     // instructions.
1598     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1599       return Legal->isMaskRequired(I);
1600     return isScalarWithPredication(I, VF);
1601   }
1602 
1603   /// Returns true if \p I is a memory instruction with consecutive memory
1604   /// access that can be widened.
1605   bool
1606   memoryInstructionCanBeWidened(Instruction *I,
1607                                 ElementCount VF = ElementCount::getFixed(1));
1608 
1609   /// Returns true if \p I is a memory instruction in an interleaved-group
1610   /// of memory accesses that can be vectorized with wide vector loads/stores
1611   /// and shuffles.
1612   bool
1613   interleavedAccessCanBeWidened(Instruction *I,
1614                                 ElementCount VF = ElementCount::getFixed(1));
1615 
1616   /// Check if \p Instr belongs to any interleaved access group.
1617   bool isAccessInterleaved(Instruction *Instr) {
1618     return InterleaveInfo.isInterleaved(Instr);
1619   }
1620 
1621   /// Get the interleaved access group that \p Instr belongs to.
1622   const InterleaveGroup<Instruction> *
1623   getInterleavedAccessGroup(Instruction *Instr) {
1624     return InterleaveInfo.getInterleaveGroup(Instr);
1625   }
1626 
1627   /// Returns true if we're required to use a scalar epilogue for at least
1628   /// the final iteration of the original loop.
1629   bool requiresScalarEpilogue(ElementCount VF) const {
1630     if (!isScalarEpilogueAllowed())
1631       return false;
1632     // If we might exit from anywhere but the latch, must run the exiting
1633     // iteration in scalar form.
1634     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1635       return true;
1636     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1637   }
1638 
1639   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1640   /// loop hint annotation.
1641   bool isScalarEpilogueAllowed() const {
1642     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1643   }
1644 
1645   /// Returns true if all loop blocks should be masked to fold tail loop.
1646   bool foldTailByMasking() const { return FoldTailByMasking; }
1647 
1648   /// Returns true if the instructions in this block requires predication
1649   /// for any reason, e.g. because tail folding now requires a predicate
1650   /// or because the block in the original loop was predicated.
1651   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1652     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1653   }
1654 
1655   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1656   /// nodes to the chain of instructions representing the reductions. Uses a
1657   /// MapVector to ensure deterministic iteration order.
1658   using ReductionChainMap =
1659       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1660 
1661   /// Return the chain of instructions representing an inloop reduction.
1662   const ReductionChainMap &getInLoopReductionChains() const {
1663     return InLoopReductionChains;
1664   }
1665 
1666   /// Returns true if the Phi is part of an inloop reduction.
1667   bool isInLoopReduction(PHINode *Phi) const {
1668     return InLoopReductionChains.count(Phi);
1669   }
1670 
1671   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1672   /// with factor VF.  Return the cost of the instruction, including
1673   /// scalarization overhead if it's needed.
1674   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1675 
1676   /// Estimate cost of a call instruction CI if it were vectorized with factor
1677   /// VF. Return the cost of the instruction, including scalarization overhead
1678   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1679   /// scalarized -
1680   /// i.e. either vector version isn't available, or is too expensive.
1681   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1682                                     bool &NeedToScalarize) const;
1683 
1684   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1685   /// that of B.
1686   bool isMoreProfitable(const VectorizationFactor &A,
1687                         const VectorizationFactor &B) const;
1688 
1689   /// Invalidates decisions already taken by the cost model.
1690   void invalidateCostModelingDecisions() {
1691     WideningDecisions.clear();
1692     Uniforms.clear();
1693     Scalars.clear();
1694   }
1695 
1696 private:
1697   unsigned NumPredStores = 0;
1698 
1699   /// Convenience function that returns the value of vscale_range iff
1700   /// vscale_range.min == vscale_range.max or otherwise returns the value
1701   /// returned by the corresponding TLI method.
1702   Optional<unsigned> getVScaleForTuning() const;
1703 
1704   /// \return An upper bound for the vectorization factors for both
1705   /// fixed and scalable vectorization, where the minimum-known number of
1706   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1707   /// disabled or unsupported, then the scalable part will be equal to
1708   /// ElementCount::getScalable(0).
1709   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1710                                            ElementCount UserVF,
1711                                            bool FoldTailByMasking);
1712 
1713   /// \return the maximized element count based on the targets vector
1714   /// registers and the loop trip-count, but limited to a maximum safe VF.
1715   /// This is a helper function of computeFeasibleMaxVF.
1716   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1717   /// issue that occurred on one of the buildbots which cannot be reproduced
1718   /// without having access to the properietary compiler (see comments on
1719   /// D98509). The issue is currently under investigation and this workaround
1720   /// will be removed as soon as possible.
1721   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1722                                        unsigned SmallestType,
1723                                        unsigned WidestType,
1724                                        const ElementCount &MaxSafeVF,
1725                                        bool FoldTailByMasking);
1726 
1727   /// \return the maximum legal scalable VF, based on the safe max number
1728   /// of elements.
1729   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1730 
1731   /// The vectorization cost is a combination of the cost itself and a boolean
1732   /// indicating whether any of the contributing operations will actually
1733   /// operate on vector values after type legalization in the backend. If this
1734   /// latter value is false, then all operations will be scalarized (i.e. no
1735   /// vectorization has actually taken place).
1736   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1737 
1738   /// Returns the expected execution cost. The unit of the cost does
1739   /// not matter because we use the 'cost' units to compare different
1740   /// vector widths. The cost that is returned is *not* normalized by
1741   /// the factor width. If \p Invalid is not nullptr, this function
1742   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1743   /// each instruction that has an Invalid cost for the given VF.
1744   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1745   VectorizationCostTy
1746   expectedCost(ElementCount VF,
1747                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1748 
1749   /// Returns the execution time cost of an instruction for a given vector
1750   /// width. Vector width of one means scalar.
1751   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1752 
1753   /// The cost-computation logic from getInstructionCost which provides
1754   /// the vector type as an output parameter.
1755   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1756                                      Type *&VectorTy);
1757 
1758   /// Return the cost of instructions in an inloop reduction pattern, if I is
1759   /// part of that pattern.
1760   Optional<InstructionCost>
1761   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1762                           TTI::TargetCostKind CostKind);
1763 
1764   /// Calculate vectorization cost of memory instruction \p I.
1765   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1766 
1767   /// The cost computation for scalarized memory instruction.
1768   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1769 
1770   /// The cost computation for interleaving group of memory instructions.
1771   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1772 
1773   /// The cost computation for Gather/Scatter instruction.
1774   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1775 
1776   /// The cost computation for widening instruction \p I with consecutive
1777   /// memory access.
1778   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1779 
1780   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1781   /// Load: scalar load + broadcast.
1782   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1783   /// element)
1784   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1785 
1786   /// Estimate the overhead of scalarizing an instruction. This is a
1787   /// convenience wrapper for the type-based getScalarizationOverhead API.
1788   InstructionCost getScalarizationOverhead(Instruction *I,
1789                                            ElementCount VF) const;
1790 
1791   /// Returns whether the instruction is a load or store and will be a emitted
1792   /// as a vector operation.
1793   bool isConsecutiveLoadOrStore(Instruction *I);
1794 
1795   /// Map of scalar integer values to the smallest bitwidth they can be legally
1796   /// represented as. The vector equivalents of these values should be truncated
1797   /// to this type.
1798   MapVector<Instruction *, uint64_t> MinBWs;
1799 
1800   /// A type representing the costs for instructions if they were to be
1801   /// scalarized rather than vectorized. The entries are Instruction-Cost
1802   /// pairs.
1803   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1804 
1805   /// A set containing all BasicBlocks that are known to present after
1806   /// vectorization as a predicated block.
1807   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1808 
1809   /// Records whether it is allowed to have the original scalar loop execute at
1810   /// least once. This may be needed as a fallback loop in case runtime
1811   /// aliasing/dependence checks fail, or to handle the tail/remainder
1812   /// iterations when the trip count is unknown or doesn't divide by the VF,
1813   /// or as a peel-loop to handle gaps in interleave-groups.
1814   /// Under optsize and when the trip count is very small we don't allow any
1815   /// iterations to execute in the scalar loop.
1816   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1817 
1818   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1819   bool FoldTailByMasking = false;
1820 
1821   /// A map holding scalar costs for different vectorization factors. The
1822   /// presence of a cost for an instruction in the mapping indicates that the
1823   /// instruction will be scalarized when vectorizing with the associated
1824   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1825   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1826 
1827   /// Holds the instructions known to be uniform after vectorization.
1828   /// The data is collected per VF.
1829   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1830 
1831   /// Holds the instructions known to be scalar after vectorization.
1832   /// The data is collected per VF.
1833   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1834 
1835   /// Holds the instructions (address computations) that are forced to be
1836   /// scalarized.
1837   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1838 
1839   /// PHINodes of the reductions that should be expanded in-loop along with
1840   /// their associated chains of reduction operations, in program order from top
1841   /// (PHI) to bottom
1842   ReductionChainMap InLoopReductionChains;
1843 
1844   /// A Map of inloop reduction operations and their immediate chain operand.
1845   /// FIXME: This can be removed once reductions can be costed correctly in
1846   /// vplan. This was added to allow quick lookup to the inloop operations,
1847   /// without having to loop through InLoopReductionChains.
1848   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1849 
1850   /// Returns the expected difference in cost from scalarizing the expression
1851   /// feeding a predicated instruction \p PredInst. The instructions to
1852   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1853   /// non-negative return value implies the expression will be scalarized.
1854   /// Currently, only single-use chains are considered for scalarization.
1855   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1856                               ElementCount VF);
1857 
1858   /// Collect the instructions that are uniform after vectorization. An
1859   /// instruction is uniform if we represent it with a single scalar value in
1860   /// the vectorized loop corresponding to each vector iteration. Examples of
1861   /// uniform instructions include pointer operands of consecutive or
1862   /// interleaved memory accesses. Note that although uniformity implies an
1863   /// instruction will be scalar, the reverse is not true. In general, a
1864   /// scalarized instruction will be represented by VF scalar values in the
1865   /// vectorized loop, each corresponding to an iteration of the original
1866   /// scalar loop.
1867   void collectLoopUniforms(ElementCount VF);
1868 
1869   /// Collect the instructions that are scalar after vectorization. An
1870   /// instruction is scalar if it is known to be uniform or will be scalarized
1871   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1872   /// to the list if they are used by a load/store instruction that is marked as
1873   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1874   /// VF values in the vectorized loop, each corresponding to an iteration of
1875   /// the original scalar loop.
1876   void collectLoopScalars(ElementCount VF);
1877 
1878   /// Keeps cost model vectorization decision and cost for instructions.
1879   /// Right now it is used for memory instructions only.
1880   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1881                                 std::pair<InstWidening, InstructionCost>>;
1882 
1883   DecisionList WideningDecisions;
1884 
1885   /// Returns true if \p V is expected to be vectorized and it needs to be
1886   /// extracted.
1887   bool needsExtract(Value *V, ElementCount VF) const {
1888     Instruction *I = dyn_cast<Instruction>(V);
1889     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1890         TheLoop->isLoopInvariant(I))
1891       return false;
1892 
1893     // Assume we can vectorize V (and hence we need extraction) if the
1894     // scalars are not computed yet. This can happen, because it is called
1895     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1896     // the scalars are collected. That should be a safe assumption in most
1897     // cases, because we check if the operands have vectorizable types
1898     // beforehand in LoopVectorizationLegality.
1899     return Scalars.find(VF) == Scalars.end() ||
1900            !isScalarAfterVectorization(I, VF);
1901   };
1902 
1903   /// Returns a range containing only operands needing to be extracted.
1904   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1905                                                    ElementCount VF) const {
1906     return SmallVector<Value *, 4>(make_filter_range(
1907         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1908   }
1909 
1910   /// Determines if we have the infrastructure to vectorize loop \p L and its
1911   /// epilogue, assuming the main loop is vectorized by \p VF.
1912   bool isCandidateForEpilogueVectorization(const Loop &L,
1913                                            const ElementCount VF) const;
1914 
1915   /// Returns true if epilogue vectorization is considered profitable, and
1916   /// false otherwise.
1917   /// \p VF is the vectorization factor chosen for the original loop.
1918   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1919 
1920 public:
1921   /// The loop that we evaluate.
1922   Loop *TheLoop;
1923 
1924   /// Predicated scalar evolution analysis.
1925   PredicatedScalarEvolution &PSE;
1926 
1927   /// Loop Info analysis.
1928   LoopInfo *LI;
1929 
1930   /// Vectorization legality.
1931   LoopVectorizationLegality *Legal;
1932 
1933   /// Vector target information.
1934   const TargetTransformInfo &TTI;
1935 
1936   /// Target Library Info.
1937   const TargetLibraryInfo *TLI;
1938 
1939   /// Demanded bits analysis.
1940   DemandedBits *DB;
1941 
1942   /// Assumption cache.
1943   AssumptionCache *AC;
1944 
1945   /// Interface to emit optimization remarks.
1946   OptimizationRemarkEmitter *ORE;
1947 
1948   const Function *TheFunction;
1949 
1950   /// Loop Vectorize Hint.
1951   const LoopVectorizeHints *Hints;
1952 
1953   /// The interleave access information contains groups of interleaved accesses
1954   /// with the same stride and close to each other.
1955   InterleavedAccessInfo &InterleaveInfo;
1956 
1957   /// Values to ignore in the cost model.
1958   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1959 
1960   /// Values to ignore in the cost model when VF > 1.
1961   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1962 
1963   /// All element types found in the loop.
1964   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1965 
1966   /// Profitable vector factors.
1967   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1968 };
1969 } // end namespace llvm
1970 
1971 /// Helper struct to manage generating runtime checks for vectorization.
1972 ///
1973 /// The runtime checks are created up-front in temporary blocks to allow better
1974 /// estimating the cost and un-linked from the existing IR. After deciding to
1975 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1976 /// temporary blocks are completely removed.
1977 class GeneratedRTChecks {
1978   /// Basic block which contains the generated SCEV checks, if any.
1979   BasicBlock *SCEVCheckBlock = nullptr;
1980 
1981   /// The value representing the result of the generated SCEV checks. If it is
1982   /// nullptr, either no SCEV checks have been generated or they have been used.
1983   Value *SCEVCheckCond = nullptr;
1984 
1985   /// Basic block which contains the generated memory runtime checks, if any.
1986   BasicBlock *MemCheckBlock = nullptr;
1987 
1988   /// The value representing the result of the generated memory runtime checks.
1989   /// If it is nullptr, either no memory runtime checks have been generated or
1990   /// they have been used.
1991   Value *MemRuntimeCheckCond = nullptr;
1992 
1993   DominatorTree *DT;
1994   LoopInfo *LI;
1995 
1996   SCEVExpander SCEVExp;
1997   SCEVExpander MemCheckExp;
1998 
1999 public:
2000   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
2001                     const DataLayout &DL)
2002       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
2003         MemCheckExp(SE, DL, "scev.check") {}
2004 
2005   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
2006   /// accurately estimate the cost of the runtime checks. The blocks are
2007   /// un-linked from the IR and is added back during vector code generation. If
2008   /// there is no vector code generation, the check blocks are removed
2009   /// completely.
2010   void Create(Loop *L, const LoopAccessInfo &LAI,
2011               const SCEVUnionPredicate &UnionPred) {
2012 
2013     BasicBlock *LoopHeader = L->getHeader();
2014     BasicBlock *Preheader = L->getLoopPreheader();
2015 
2016     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
2017     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
2018     // may be used by SCEVExpander. The blocks will be un-linked from their
2019     // predecessors and removed from LI & DT at the end of the function.
2020     if (!UnionPred.isAlwaysTrue()) {
2021       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
2022                                   nullptr, "vector.scevcheck");
2023 
2024       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
2025           &UnionPred, SCEVCheckBlock->getTerminator());
2026     }
2027 
2028     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
2029     if (RtPtrChecking.Need) {
2030       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2031       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2032                                  "vector.memcheck");
2033 
2034       MemRuntimeCheckCond =
2035           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2036                            RtPtrChecking.getChecks(), MemCheckExp);
2037       assert(MemRuntimeCheckCond &&
2038              "no RT checks generated although RtPtrChecking "
2039              "claimed checks are required");
2040     }
2041 
2042     if (!MemCheckBlock && !SCEVCheckBlock)
2043       return;
2044 
2045     // Unhook the temporary block with the checks, update various places
2046     // accordingly.
2047     if (SCEVCheckBlock)
2048       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2049     if (MemCheckBlock)
2050       MemCheckBlock->replaceAllUsesWith(Preheader);
2051 
2052     if (SCEVCheckBlock) {
2053       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2054       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2055       Preheader->getTerminator()->eraseFromParent();
2056     }
2057     if (MemCheckBlock) {
2058       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2059       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2060       Preheader->getTerminator()->eraseFromParent();
2061     }
2062 
2063     DT->changeImmediateDominator(LoopHeader, Preheader);
2064     if (MemCheckBlock) {
2065       DT->eraseNode(MemCheckBlock);
2066       LI->removeBlock(MemCheckBlock);
2067     }
2068     if (SCEVCheckBlock) {
2069       DT->eraseNode(SCEVCheckBlock);
2070       LI->removeBlock(SCEVCheckBlock);
2071     }
2072   }
2073 
2074   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2075   /// unused.
2076   ~GeneratedRTChecks() {
2077     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2078     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2079     if (!SCEVCheckCond)
2080       SCEVCleaner.markResultUsed();
2081 
2082     if (!MemRuntimeCheckCond)
2083       MemCheckCleaner.markResultUsed();
2084 
2085     if (MemRuntimeCheckCond) {
2086       auto &SE = *MemCheckExp.getSE();
2087       // Memory runtime check generation creates compares that use expanded
2088       // values. Remove them before running the SCEVExpanderCleaners.
2089       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2090         if (MemCheckExp.isInsertedInstruction(&I))
2091           continue;
2092         SE.forgetValue(&I);
2093         I.eraseFromParent();
2094       }
2095     }
2096     MemCheckCleaner.cleanup();
2097     SCEVCleaner.cleanup();
2098 
2099     if (SCEVCheckCond)
2100       SCEVCheckBlock->eraseFromParent();
2101     if (MemRuntimeCheckCond)
2102       MemCheckBlock->eraseFromParent();
2103   }
2104 
2105   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2106   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2107   /// depending on the generated condition.
2108   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2109                              BasicBlock *LoopVectorPreHeader,
2110                              BasicBlock *LoopExitBlock) {
2111     if (!SCEVCheckCond)
2112       return nullptr;
2113     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2114       if (C->isZero())
2115         return nullptr;
2116 
2117     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2118 
2119     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2120     // Create new preheader for vector loop.
2121     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2122       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2123 
2124     SCEVCheckBlock->getTerminator()->eraseFromParent();
2125     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2126     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2127                                                 SCEVCheckBlock);
2128 
2129     DT->addNewBlock(SCEVCheckBlock, Pred);
2130     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2131 
2132     ReplaceInstWithInst(
2133         SCEVCheckBlock->getTerminator(),
2134         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2135     // Mark the check as used, to prevent it from being removed during cleanup.
2136     SCEVCheckCond = nullptr;
2137     return SCEVCheckBlock;
2138   }
2139 
2140   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2141   /// the branches to branch to the vector preheader or \p Bypass, depending on
2142   /// the generated condition.
2143   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2144                                    BasicBlock *LoopVectorPreHeader) {
2145     // Check if we generated code that checks in runtime if arrays overlap.
2146     if (!MemRuntimeCheckCond)
2147       return nullptr;
2148 
2149     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2150     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2151                                                 MemCheckBlock);
2152 
2153     DT->addNewBlock(MemCheckBlock, Pred);
2154     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2155     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2156 
2157     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2158       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2159 
2160     ReplaceInstWithInst(
2161         MemCheckBlock->getTerminator(),
2162         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2163     MemCheckBlock->getTerminator()->setDebugLoc(
2164         Pred->getTerminator()->getDebugLoc());
2165 
2166     // Mark the check as used, to prevent it from being removed during cleanup.
2167     MemRuntimeCheckCond = nullptr;
2168     return MemCheckBlock;
2169   }
2170 };
2171 
2172 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2173 // vectorization. The loop needs to be annotated with #pragma omp simd
2174 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2175 // vector length information is not provided, vectorization is not considered
2176 // explicit. Interleave hints are not allowed either. These limitations will be
2177 // relaxed in the future.
2178 // Please, note that we are currently forced to abuse the pragma 'clang
2179 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2180 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2181 // provides *explicit vectorization hints* (LV can bypass legal checks and
2182 // assume that vectorization is legal). However, both hints are implemented
2183 // using the same metadata (llvm.loop.vectorize, processed by
2184 // LoopVectorizeHints). This will be fixed in the future when the native IR
2185 // representation for pragma 'omp simd' is introduced.
2186 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2187                                    OptimizationRemarkEmitter *ORE) {
2188   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2189   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2190 
2191   // Only outer loops with an explicit vectorization hint are supported.
2192   // Unannotated outer loops are ignored.
2193   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2194     return false;
2195 
2196   Function *Fn = OuterLp->getHeader()->getParent();
2197   if (!Hints.allowVectorization(Fn, OuterLp,
2198                                 true /*VectorizeOnlyWhenForced*/)) {
2199     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2200     return false;
2201   }
2202 
2203   if (Hints.getInterleave() > 1) {
2204     // TODO: Interleave support is future work.
2205     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2206                          "outer loops.\n");
2207     Hints.emitRemarkWithHints();
2208     return false;
2209   }
2210 
2211   return true;
2212 }
2213 
2214 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2215                                   OptimizationRemarkEmitter *ORE,
2216                                   SmallVectorImpl<Loop *> &V) {
2217   // Collect inner loops and outer loops without irreducible control flow. For
2218   // now, only collect outer loops that have explicit vectorization hints. If we
2219   // are stress testing the VPlan H-CFG construction, we collect the outermost
2220   // loop of every loop nest.
2221   if (L.isInnermost() || VPlanBuildStressTest ||
2222       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2223     LoopBlocksRPO RPOT(&L);
2224     RPOT.perform(LI);
2225     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2226       V.push_back(&L);
2227       // TODO: Collect inner loops inside marked outer loops in case
2228       // vectorization fails for the outer loop. Do not invoke
2229       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2230       // already known to be reducible. We can use an inherited attribute for
2231       // that.
2232       return;
2233     }
2234   }
2235   for (Loop *InnerL : L)
2236     collectSupportedLoops(*InnerL, LI, ORE, V);
2237 }
2238 
2239 namespace {
2240 
2241 /// The LoopVectorize Pass.
2242 struct LoopVectorize : public FunctionPass {
2243   /// Pass identification, replacement for typeid
2244   static char ID;
2245 
2246   LoopVectorizePass Impl;
2247 
2248   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2249                          bool VectorizeOnlyWhenForced = false)
2250       : FunctionPass(ID),
2251         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2252     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2253   }
2254 
2255   bool runOnFunction(Function &F) override {
2256     if (skipFunction(F))
2257       return false;
2258 
2259     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2260     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2261     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2262     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2263     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2264     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2265     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2266     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2267     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2268     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2269     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2270     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2271     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2272 
2273     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2274         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2275 
2276     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2277                         GetLAA, *ORE, PSI).MadeAnyChange;
2278   }
2279 
2280   void getAnalysisUsage(AnalysisUsage &AU) const override {
2281     AU.addRequired<AssumptionCacheTracker>();
2282     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2283     AU.addRequired<DominatorTreeWrapperPass>();
2284     AU.addRequired<LoopInfoWrapperPass>();
2285     AU.addRequired<ScalarEvolutionWrapperPass>();
2286     AU.addRequired<TargetTransformInfoWrapperPass>();
2287     AU.addRequired<AAResultsWrapperPass>();
2288     AU.addRequired<LoopAccessLegacyAnalysis>();
2289     AU.addRequired<DemandedBitsWrapperPass>();
2290     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2291     AU.addRequired<InjectTLIMappingsLegacy>();
2292 
2293     // We currently do not preserve loopinfo/dominator analyses with outer loop
2294     // vectorization. Until this is addressed, mark these analyses as preserved
2295     // only for non-VPlan-native path.
2296     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2297     if (!EnableVPlanNativePath) {
2298       AU.addPreserved<LoopInfoWrapperPass>();
2299       AU.addPreserved<DominatorTreeWrapperPass>();
2300     }
2301 
2302     AU.addPreserved<BasicAAWrapperPass>();
2303     AU.addPreserved<GlobalsAAWrapperPass>();
2304     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2305   }
2306 };
2307 
2308 } // end anonymous namespace
2309 
2310 //===----------------------------------------------------------------------===//
2311 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2312 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2313 //===----------------------------------------------------------------------===//
2314 
2315 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2316   // We need to place the broadcast of invariant variables outside the loop,
2317   // but only if it's proven safe to do so. Else, broadcast will be inside
2318   // vector loop body.
2319   Instruction *Instr = dyn_cast<Instruction>(V);
2320   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2321                      (!Instr ||
2322                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2323   // Place the code for broadcasting invariant variables in the new preheader.
2324   IRBuilder<>::InsertPointGuard Guard(Builder);
2325   if (SafeToHoist)
2326     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2327 
2328   // Broadcast the scalar into all locations in the vector.
2329   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2330 
2331   return Shuf;
2332 }
2333 
2334 /// This function adds
2335 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2336 /// to each vector element of Val. The sequence starts at StartIndex.
2337 /// \p Opcode is relevant for FP induction variable.
2338 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2339                             Instruction::BinaryOps BinOp, ElementCount VF,
2340                             IRBuilder<> &Builder) {
2341   assert(VF.isVector() && "only vector VFs are supported");
2342 
2343   // Create and check the types.
2344   auto *ValVTy = cast<VectorType>(Val->getType());
2345   ElementCount VLen = ValVTy->getElementCount();
2346 
2347   Type *STy = Val->getType()->getScalarType();
2348   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2349          "Induction Step must be an integer or FP");
2350   assert(Step->getType() == STy && "Step has wrong type");
2351 
2352   SmallVector<Constant *, 8> Indices;
2353 
2354   // Create a vector of consecutive numbers from zero to VF.
2355   VectorType *InitVecValVTy = ValVTy;
2356   Type *InitVecValSTy = STy;
2357   if (STy->isFloatingPointTy()) {
2358     InitVecValSTy =
2359         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2360     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2361   }
2362   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2363 
2364   // Splat the StartIdx
2365   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2366 
2367   if (STy->isIntegerTy()) {
2368     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2369     Step = Builder.CreateVectorSplat(VLen, Step);
2370     assert(Step->getType() == Val->getType() && "Invalid step vec");
2371     // FIXME: The newly created binary instructions should contain nsw/nuw
2372     // flags, which can be found from the original scalar operations.
2373     Step = Builder.CreateMul(InitVec, Step);
2374     return Builder.CreateAdd(Val, Step, "induction");
2375   }
2376 
2377   // Floating point induction.
2378   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2379          "Binary Opcode should be specified for FP induction");
2380   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2381   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2382 
2383   Step = Builder.CreateVectorSplat(VLen, Step);
2384   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2385   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2386 }
2387 
2388 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2389     const InductionDescriptor &II, Value *Step, Value *Start,
2390     Instruction *EntryVal, VPValue *Def, VPTransformState &State) {
2391   IRBuilder<> &Builder = State.Builder;
2392   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2393          "Expected either an induction phi-node or a truncate of it!");
2394 
2395   // Construct the initial value of the vector IV in the vector loop preheader
2396   auto CurrIP = Builder.saveIP();
2397   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2398   if (isa<TruncInst>(EntryVal)) {
2399     assert(Start->getType()->isIntegerTy() &&
2400            "Truncation requires an integer type");
2401     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2402     Step = Builder.CreateTrunc(Step, TruncType);
2403     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2404   }
2405 
2406   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
2407   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
2408   Value *SteppedStart = getStepVector(
2409       SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder);
2410 
2411   // We create vector phi nodes for both integer and floating-point induction
2412   // variables. Here, we determine the kind of arithmetic we will perform.
2413   Instruction::BinaryOps AddOp;
2414   Instruction::BinaryOps MulOp;
2415   if (Step->getType()->isIntegerTy()) {
2416     AddOp = Instruction::Add;
2417     MulOp = Instruction::Mul;
2418   } else {
2419     AddOp = II.getInductionOpcode();
2420     MulOp = Instruction::FMul;
2421   }
2422 
2423   // Multiply the vectorization factor by the step using integer or
2424   // floating-point arithmetic as appropriate.
2425   Type *StepType = Step->getType();
2426   Value *RuntimeVF;
2427   if (Step->getType()->isFloatingPointTy())
2428     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
2429   else
2430     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
2431   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2432 
2433   // Create a vector splat to use in the induction update.
2434   //
2435   // FIXME: If the step is non-constant, we create the vector splat with
2436   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2437   //        handle a constant vector splat.
2438   Value *SplatVF = isa<Constant>(Mul)
2439                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
2440                        : Builder.CreateVectorSplat(State.VF, Mul);
2441   Builder.restoreIP(CurrIP);
2442 
2443   // We may need to add the step a number of times, depending on the unroll
2444   // factor. The last of those goes into the PHI.
2445   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2446                                     &*LoopVectorBody->getFirstInsertionPt());
2447   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2448   Instruction *LastInduction = VecInd;
2449   for (unsigned Part = 0; Part < UF; ++Part) {
2450     State.set(Def, LastInduction, Part);
2451 
2452     if (isa<TruncInst>(EntryVal))
2453       addMetadata(LastInduction, EntryVal);
2454 
2455     LastInduction = cast<Instruction>(
2456         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2457     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2458   }
2459 
2460   // Move the last step to the end of the latch block. This ensures consistent
2461   // placement of all induction updates.
2462   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2463   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2464   LastInduction->moveBefore(Br);
2465   LastInduction->setName("vec.ind.next");
2466 
2467   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2468   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2469 }
2470 
2471 void InnerLoopVectorizer::widenIntOrFpInduction(
2472     PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, VPTransformState &State,
2473     Value *CanonicalIV) {
2474   Value *Start = Def->getStartValue()->getLiveInIRValue();
2475   const InductionDescriptor &ID = Def->getInductionDescriptor();
2476   TruncInst *Trunc = Def->getTruncInst();
2477   IRBuilder<> &Builder = State.Builder;
2478   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2479   assert(!State.VF.isZero() && "VF must be non-zero");
2480 
2481   // The value from the original loop to which we are mapping the new induction
2482   // variable.
2483   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2484 
2485   auto &DL = EntryVal->getModule()->getDataLayout();
2486 
2487   // Generate code for the induction step. Note that induction steps are
2488   // required to be loop-invariant
2489   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2490     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2491            "Induction step should be loop invariant");
2492     if (PSE.getSE()->isSCEVable(IV->getType())) {
2493       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2494       return Exp.expandCodeFor(Step, Step->getType(),
2495                                State.CFG.VectorPreHeader->getTerminator());
2496     }
2497     return cast<SCEVUnknown>(Step)->getValue();
2498   };
2499 
2500   // The scalar value to broadcast. This is derived from the canonical
2501   // induction variable. If a truncation type is given, truncate the canonical
2502   // induction variable and step. Otherwise, derive these values from the
2503   // induction descriptor.
2504   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2505     Value *ScalarIV = CanonicalIV;
2506     Type *NeededType = IV->getType();
2507     if (!Def->isCanonical() || ScalarIV->getType() != NeededType) {
2508       ScalarIV =
2509           NeededType->isIntegerTy()
2510               ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType)
2511               : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType);
2512       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID,
2513                                       State.CFG.PrevBB);
2514       ScalarIV->setName("offset.idx");
2515     }
2516     if (Trunc) {
2517       auto *TruncType = cast<IntegerType>(Trunc->getType());
2518       assert(Step->getType()->isIntegerTy() &&
2519              "Truncation requires an integer step");
2520       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2521       Step = Builder.CreateTrunc(Step, TruncType);
2522     }
2523     return ScalarIV;
2524   };
2525 
2526   // Fast-math-flags propagate from the original induction instruction.
2527   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2528   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2529     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2530 
2531   // Now do the actual transformations, and start with creating the step value.
2532   Value *Step = CreateStepValue(ID.getStep());
2533   if (State.VF.isScalar()) {
2534     Value *ScalarIV = CreateScalarIV(Step);
2535     Type *ScalarTy = IntegerType::get(ScalarIV->getContext(),
2536                                       Step->getType()->getScalarSizeInBits());
2537 
2538     Instruction::BinaryOps IncOp = ID.getInductionOpcode();
2539     if (IncOp == Instruction::BinaryOpsEnd)
2540       IncOp = Instruction::Add;
2541     for (unsigned Part = 0; Part < UF; ++Part) {
2542       Value *StartIdx = ConstantInt::get(ScalarTy, Part);
2543       Instruction::BinaryOps MulOp = Instruction::Mul;
2544       if (Step->getType()->isFloatingPointTy()) {
2545         StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType());
2546         MulOp = Instruction::FMul;
2547       }
2548 
2549       Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2550       Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction");
2551       State.set(Def, EntryPart, Part);
2552       if (Trunc) {
2553         assert(!Step->getType()->isFloatingPointTy() &&
2554                "fp inductions shouldn't be truncated");
2555         addMetadata(EntryPart, Trunc);
2556       }
2557     }
2558     return;
2559   }
2560 
2561   // Create a new independent vector induction variable, if one is needed.
2562   if (Def->needsVectorIV())
2563     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2564 
2565   if (Def->needsScalarIV()) {
2566     // Create scalar steps that can be used by instructions we will later
2567     // scalarize. Note that the addition of the scalar steps will not increase
2568     // the number of instructions in the loop in the common case prior to
2569     // InstCombine. We will be trading one vector extract for each scalar step.
2570     Value *ScalarIV = CreateScalarIV(Step);
2571     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2572   }
2573 }
2574 
2575 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2576                                            Instruction *EntryVal,
2577                                            const InductionDescriptor &ID,
2578                                            VPValue *Def,
2579                                            VPTransformState &State) {
2580   IRBuilder<> &Builder = State.Builder;
2581   // We shouldn't have to build scalar steps if we aren't vectorizing.
2582   assert(State.VF.isVector() && "VF should be greater than one");
2583   // Get the value type and ensure it and the step have the same integer type.
2584   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2585   assert(ScalarIVTy == Step->getType() &&
2586          "Val and Step should have the same type");
2587 
2588   // We build scalar steps for both integer and floating-point induction
2589   // variables. Here, we determine the kind of arithmetic we will perform.
2590   Instruction::BinaryOps AddOp;
2591   Instruction::BinaryOps MulOp;
2592   if (ScalarIVTy->isIntegerTy()) {
2593     AddOp = Instruction::Add;
2594     MulOp = Instruction::Mul;
2595   } else {
2596     AddOp = ID.getInductionOpcode();
2597     MulOp = Instruction::FMul;
2598   }
2599 
2600   // Determine the number of scalars we need to generate for each unroll
2601   // iteration.
2602   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
2603   unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2604   // Compute the scalar steps and save the results in State.
2605   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2606                                      ScalarIVTy->getScalarSizeInBits());
2607   Type *VecIVTy = nullptr;
2608   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2609   if (!FirstLaneOnly && State.VF.isScalable()) {
2610     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2611     UnitStepVec =
2612         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2613     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2614     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2615   }
2616 
2617   for (unsigned Part = 0; Part < State.UF; ++Part) {
2618     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2619 
2620     if (!FirstLaneOnly && State.VF.isScalable()) {
2621       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2622       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2623       if (ScalarIVTy->isFloatingPointTy())
2624         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2625       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2626       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2627       State.set(Def, Add, Part);
2628       // It's useful to record the lane values too for the known minimum number
2629       // of elements so we do those below. This improves the code quality when
2630       // trying to extract the first element, for example.
2631     }
2632 
2633     if (ScalarIVTy->isFloatingPointTy())
2634       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2635 
2636     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2637       Value *StartIdx = Builder.CreateBinOp(
2638           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2639       // The step returned by `createStepForVF` is a runtime-evaluated value
2640       // when VF is scalable. Otherwise, it should be folded into a Constant.
2641       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2642              "Expected StartIdx to be folded to a constant when VF is not "
2643              "scalable");
2644       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2645       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2646       State.set(Def, Add, VPIteration(Part, Lane));
2647     }
2648   }
2649 }
2650 
2651 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2652                                                     const VPIteration &Instance,
2653                                                     VPTransformState &State) {
2654   Value *ScalarInst = State.get(Def, Instance);
2655   Value *VectorValue = State.get(Def, Instance.Part);
2656   VectorValue = Builder.CreateInsertElement(
2657       VectorValue, ScalarInst,
2658       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2659   State.set(Def, VectorValue, Instance.Part);
2660 }
2661 
2662 // Return whether we allow using masked interleave-groups (for dealing with
2663 // strided loads/stores that reside in predicated blocks, or for dealing
2664 // with gaps).
2665 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2666   // If an override option has been passed in for interleaved accesses, use it.
2667   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2668     return EnableMaskedInterleavedMemAccesses;
2669 
2670   return TTI.enableMaskedInterleavedAccessVectorization();
2671 }
2672 
2673 // Try to vectorize the interleave group that \p Instr belongs to.
2674 //
2675 // E.g. Translate following interleaved load group (factor = 3):
2676 //   for (i = 0; i < N; i+=3) {
2677 //     R = Pic[i];             // Member of index 0
2678 //     G = Pic[i+1];           // Member of index 1
2679 //     B = Pic[i+2];           // Member of index 2
2680 //     ... // do something to R, G, B
2681 //   }
2682 // To:
2683 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2684 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2685 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2686 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2687 //
2688 // Or translate following interleaved store group (factor = 3):
2689 //   for (i = 0; i < N; i+=3) {
2690 //     ... do something to R, G, B
2691 //     Pic[i]   = R;           // Member of index 0
2692 //     Pic[i+1] = G;           // Member of index 1
2693 //     Pic[i+2] = B;           // Member of index 2
2694 //   }
2695 // To:
2696 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2697 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2698 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2699 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2700 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2701 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2702     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2703     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2704     VPValue *BlockInMask) {
2705   Instruction *Instr = Group->getInsertPos();
2706   const DataLayout &DL = Instr->getModule()->getDataLayout();
2707 
2708   // Prepare for the vector type of the interleaved load/store.
2709   Type *ScalarTy = getLoadStoreType(Instr);
2710   unsigned InterleaveFactor = Group->getFactor();
2711   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2712   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2713 
2714   // Prepare for the new pointers.
2715   SmallVector<Value *, 2> AddrParts;
2716   unsigned Index = Group->getIndex(Instr);
2717 
2718   // TODO: extend the masked interleaved-group support to reversed access.
2719   assert((!BlockInMask || !Group->isReverse()) &&
2720          "Reversed masked interleave-group not supported.");
2721 
2722   // If the group is reverse, adjust the index to refer to the last vector lane
2723   // instead of the first. We adjust the index from the first vector lane,
2724   // rather than directly getting the pointer for lane VF - 1, because the
2725   // pointer operand of the interleaved access is supposed to be uniform. For
2726   // uniform instructions, we're only required to generate a value for the
2727   // first vector lane in each unroll iteration.
2728   if (Group->isReverse())
2729     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2730 
2731   for (unsigned Part = 0; Part < UF; Part++) {
2732     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2733     setDebugLocFromInst(AddrPart);
2734 
2735     // Notice current instruction could be any index. Need to adjust the address
2736     // to the member of index 0.
2737     //
2738     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2739     //       b = A[i];       // Member of index 0
2740     // Current pointer is pointed to A[i+1], adjust it to A[i].
2741     //
2742     // E.g.  A[i+1] = a;     // Member of index 1
2743     //       A[i]   = b;     // Member of index 0
2744     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2745     // Current pointer is pointed to A[i+2], adjust it to A[i].
2746 
2747     bool InBounds = false;
2748     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2749       InBounds = gep->isInBounds();
2750     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2751     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2752 
2753     // Cast to the vector pointer type.
2754     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2755     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2756     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2757   }
2758 
2759   setDebugLocFromInst(Instr);
2760   Value *PoisonVec = PoisonValue::get(VecTy);
2761 
2762   Value *MaskForGaps = nullptr;
2763   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2764     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2765     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2766   }
2767 
2768   // Vectorize the interleaved load group.
2769   if (isa<LoadInst>(Instr)) {
2770     // For each unroll part, create a wide load for the group.
2771     SmallVector<Value *, 2> NewLoads;
2772     for (unsigned Part = 0; Part < UF; Part++) {
2773       Instruction *NewLoad;
2774       if (BlockInMask || MaskForGaps) {
2775         assert(useMaskedInterleavedAccesses(*TTI) &&
2776                "masked interleaved groups are not allowed.");
2777         Value *GroupMask = MaskForGaps;
2778         if (BlockInMask) {
2779           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2780           Value *ShuffledMask = Builder.CreateShuffleVector(
2781               BlockInMaskPart,
2782               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2783               "interleaved.mask");
2784           GroupMask = MaskForGaps
2785                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2786                                                 MaskForGaps)
2787                           : ShuffledMask;
2788         }
2789         NewLoad =
2790             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2791                                      GroupMask, PoisonVec, "wide.masked.vec");
2792       }
2793       else
2794         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2795                                             Group->getAlign(), "wide.vec");
2796       Group->addMetadata(NewLoad);
2797       NewLoads.push_back(NewLoad);
2798     }
2799 
2800     // For each member in the group, shuffle out the appropriate data from the
2801     // wide loads.
2802     unsigned J = 0;
2803     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2804       Instruction *Member = Group->getMember(I);
2805 
2806       // Skip the gaps in the group.
2807       if (!Member)
2808         continue;
2809 
2810       auto StrideMask =
2811           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2812       for (unsigned Part = 0; Part < UF; Part++) {
2813         Value *StridedVec = Builder.CreateShuffleVector(
2814             NewLoads[Part], StrideMask, "strided.vec");
2815 
2816         // If this member has different type, cast the result type.
2817         if (Member->getType() != ScalarTy) {
2818           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2819           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2820           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2821         }
2822 
2823         if (Group->isReverse())
2824           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2825 
2826         State.set(VPDefs[J], StridedVec, Part);
2827       }
2828       ++J;
2829     }
2830     return;
2831   }
2832 
2833   // The sub vector type for current instruction.
2834   auto *SubVT = VectorType::get(ScalarTy, VF);
2835 
2836   // Vectorize the interleaved store group.
2837   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2838   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2839          "masked interleaved groups are not allowed.");
2840   assert((!MaskForGaps || !VF.isScalable()) &&
2841          "masking gaps for scalable vectors is not yet supported.");
2842   for (unsigned Part = 0; Part < UF; Part++) {
2843     // Collect the stored vector from each member.
2844     SmallVector<Value *, 4> StoredVecs;
2845     for (unsigned i = 0; i < InterleaveFactor; i++) {
2846       assert((Group->getMember(i) || MaskForGaps) &&
2847              "Fail to get a member from an interleaved store group");
2848       Instruction *Member = Group->getMember(i);
2849 
2850       // Skip the gaps in the group.
2851       if (!Member) {
2852         Value *Undef = PoisonValue::get(SubVT);
2853         StoredVecs.push_back(Undef);
2854         continue;
2855       }
2856 
2857       Value *StoredVec = State.get(StoredValues[i], Part);
2858 
2859       if (Group->isReverse())
2860         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2861 
2862       // If this member has different type, cast it to a unified type.
2863 
2864       if (StoredVec->getType() != SubVT)
2865         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2866 
2867       StoredVecs.push_back(StoredVec);
2868     }
2869 
2870     // Concatenate all vectors into a wide vector.
2871     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2872 
2873     // Interleave the elements in the wide vector.
2874     Value *IVec = Builder.CreateShuffleVector(
2875         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2876         "interleaved.vec");
2877 
2878     Instruction *NewStoreInstr;
2879     if (BlockInMask || MaskForGaps) {
2880       Value *GroupMask = MaskForGaps;
2881       if (BlockInMask) {
2882         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2883         Value *ShuffledMask = Builder.CreateShuffleVector(
2884             BlockInMaskPart,
2885             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2886             "interleaved.mask");
2887         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2888                                                       ShuffledMask, MaskForGaps)
2889                                 : ShuffledMask;
2890       }
2891       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2892                                                 Group->getAlign(), GroupMask);
2893     } else
2894       NewStoreInstr =
2895           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2896 
2897     Group->addMetadata(NewStoreInstr);
2898   }
2899 }
2900 
2901 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2902                                                VPReplicateRecipe *RepRecipe,
2903                                                const VPIteration &Instance,
2904                                                bool IfPredicateInstr,
2905                                                VPTransformState &State) {
2906   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2907 
2908   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2909   // the first lane and part.
2910   if (isa<NoAliasScopeDeclInst>(Instr))
2911     if (!Instance.isFirstIteration())
2912       return;
2913 
2914   setDebugLocFromInst(Instr);
2915 
2916   // Does this instruction return a value ?
2917   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2918 
2919   Instruction *Cloned = Instr->clone();
2920   if (!IsVoidRetTy)
2921     Cloned->setName(Instr->getName() + ".cloned");
2922 
2923   // If the scalarized instruction contributes to the address computation of a
2924   // widen masked load/store which was in a basic block that needed predication
2925   // and is not predicated after vectorization, we can't propagate
2926   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2927   // instruction could feed a poison value to the base address of the widen
2928   // load/store.
2929   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2930     Cloned->dropPoisonGeneratingFlags();
2931 
2932   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2933                                Builder.GetInsertPoint());
2934   // Replace the operands of the cloned instructions with their scalar
2935   // equivalents in the new loop.
2936   for (auto &I : enumerate(RepRecipe->operands())) {
2937     auto InputInstance = Instance;
2938     VPValue *Operand = I.value();
2939     VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand);
2940     if (OperandR && OperandR->isUniform())
2941       InputInstance.Lane = VPLane::getFirstLane();
2942     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2943   }
2944   addNewMetadata(Cloned, Instr);
2945 
2946   // Place the cloned scalar in the new loop.
2947   Builder.Insert(Cloned);
2948 
2949   State.set(RepRecipe, Cloned, Instance);
2950 
2951   // If we just cloned a new assumption, add it the assumption cache.
2952   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2953     AC->registerAssumption(II);
2954 
2955   // End if-block.
2956   if (IfPredicateInstr)
2957     PredicatedInstructions.push_back(Cloned);
2958 }
2959 
2960 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
2961   BasicBlock *Header = L->getHeader();
2962   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
2963 
2964   IRBuilder<> B(Header->getTerminator());
2965   Instruction *OldInst =
2966       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
2967   setDebugLocFromInst(OldInst, &B);
2968 
2969   // Connect the header to the exit and header blocks and replace the old
2970   // terminator.
2971   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
2972 
2973   // Now we have two terminators. Remove the old one from the block.
2974   Header->getTerminator()->eraseFromParent();
2975 }
2976 
2977 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2978   if (TripCount)
2979     return TripCount;
2980 
2981   assert(L && "Create Trip Count for null loop.");
2982   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2983   // Find the loop boundaries.
2984   ScalarEvolution *SE = PSE.getSE();
2985   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2986   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2987          "Invalid loop count");
2988 
2989   Type *IdxTy = Legal->getWidestInductionType();
2990   assert(IdxTy && "No type for induction");
2991 
2992   // The exit count might have the type of i64 while the phi is i32. This can
2993   // happen if we have an induction variable that is sign extended before the
2994   // compare. The only way that we get a backedge taken count is that the
2995   // induction variable was signed and as such will not overflow. In such a case
2996   // truncation is legal.
2997   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2998       IdxTy->getPrimitiveSizeInBits())
2999     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3000   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3001 
3002   // Get the total trip count from the count by adding 1.
3003   const SCEV *ExitCount = SE->getAddExpr(
3004       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3005 
3006   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3007 
3008   // Expand the trip count and place the new instructions in the preheader.
3009   // Notice that the pre-header does not change, only the loop body.
3010   SCEVExpander Exp(*SE, DL, "induction");
3011 
3012   // Count holds the overall loop count (N).
3013   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3014                                 L->getLoopPreheader()->getTerminator());
3015 
3016   if (TripCount->getType()->isPointerTy())
3017     TripCount =
3018         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3019                                     L->getLoopPreheader()->getTerminator());
3020 
3021   return TripCount;
3022 }
3023 
3024 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3025   if (VectorTripCount)
3026     return VectorTripCount;
3027 
3028   Value *TC = getOrCreateTripCount(L);
3029   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3030 
3031   Type *Ty = TC->getType();
3032   // This is where we can make the step a runtime constant.
3033   Value *Step = createStepForVF(Builder, Ty, VF, UF);
3034 
3035   // If the tail is to be folded by masking, round the number of iterations N
3036   // up to a multiple of Step instead of rounding down. This is done by first
3037   // adding Step-1 and then rounding down. Note that it's ok if this addition
3038   // overflows: the vector induction variable will eventually wrap to zero given
3039   // that it starts at zero and its Step is a power of two; the loop will then
3040   // exit, with the last early-exit vector comparison also producing all-true.
3041   if (Cost->foldTailByMasking()) {
3042     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3043            "VF*UF must be a power of 2 when folding tail by masking");
3044     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
3045     TC = Builder.CreateAdd(
3046         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
3047   }
3048 
3049   // Now we need to generate the expression for the part of the loop that the
3050   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3051   // iterations are not required for correctness, or N - Step, otherwise. Step
3052   // is equal to the vectorization factor (number of SIMD elements) times the
3053   // unroll factor (number of SIMD instructions).
3054   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3055 
3056   // There are cases where we *must* run at least one iteration in the remainder
3057   // loop.  See the cost model for when this can happen.  If the step evenly
3058   // divides the trip count, we set the remainder to be equal to the step. If
3059   // the step does not evenly divide the trip count, no adjustment is necessary
3060   // since there will already be scalar iterations. Note that the minimum
3061   // iterations check ensures that N >= Step.
3062   if (Cost->requiresScalarEpilogue(VF)) {
3063     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3064     R = Builder.CreateSelect(IsZero, Step, R);
3065   }
3066 
3067   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3068 
3069   return VectorTripCount;
3070 }
3071 
3072 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3073                                                    const DataLayout &DL) {
3074   // Verify that V is a vector type with same number of elements as DstVTy.
3075   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3076   unsigned VF = DstFVTy->getNumElements();
3077   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3078   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3079   Type *SrcElemTy = SrcVecTy->getElementType();
3080   Type *DstElemTy = DstFVTy->getElementType();
3081   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3082          "Vector elements must have same size");
3083 
3084   // Do a direct cast if element types are castable.
3085   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3086     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3087   }
3088   // V cannot be directly casted to desired vector type.
3089   // May happen when V is a floating point vector but DstVTy is a vector of
3090   // pointers or vice-versa. Handle this using a two-step bitcast using an
3091   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3092   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3093          "Only one type should be a pointer type");
3094   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3095          "Only one type should be a floating point type");
3096   Type *IntTy =
3097       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3098   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3099   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3100   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3101 }
3102 
3103 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3104                                                          BasicBlock *Bypass) {
3105   Value *Count = getOrCreateTripCount(L);
3106   // Reuse existing vector loop preheader for TC checks.
3107   // Note that new preheader block is generated for vector loop.
3108   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3109   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3110 
3111   // Generate code to check if the loop's trip count is less than VF * UF, or
3112   // equal to it in case a scalar epilogue is required; this implies that the
3113   // vector trip count is zero. This check also covers the case where adding one
3114   // to the backedge-taken count overflowed leading to an incorrect trip count
3115   // of zero. In this case we will also jump to the scalar loop.
3116   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3117                                             : ICmpInst::ICMP_ULT;
3118 
3119   // If tail is to be folded, vector loop takes care of all iterations.
3120   Value *CheckMinIters = Builder.getFalse();
3121   if (!Cost->foldTailByMasking()) {
3122     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3123     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3124   }
3125   // Create new preheader for vector loop.
3126   LoopVectorPreHeader =
3127       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3128                  "vector.ph");
3129 
3130   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3131                                DT->getNode(Bypass)->getIDom()) &&
3132          "TC check is expected to dominate Bypass");
3133 
3134   // Update dominator for Bypass & LoopExit (if needed).
3135   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3136   if (!Cost->requiresScalarEpilogue(VF))
3137     // If there is an epilogue which must run, there's no edge from the
3138     // middle block to exit blocks  and thus no need to update the immediate
3139     // dominator of the exit blocks.
3140     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3141 
3142   ReplaceInstWithInst(
3143       TCCheckBlock->getTerminator(),
3144       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3145   LoopBypassBlocks.push_back(TCCheckBlock);
3146 }
3147 
3148 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3149 
3150   BasicBlock *const SCEVCheckBlock =
3151       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3152   if (!SCEVCheckBlock)
3153     return nullptr;
3154 
3155   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3156            (OptForSizeBasedOnProfile &&
3157             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3158          "Cannot SCEV check stride or overflow when optimizing for size");
3159 
3160 
3161   // Update dominator only if this is first RT check.
3162   if (LoopBypassBlocks.empty()) {
3163     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3164     if (!Cost->requiresScalarEpilogue(VF))
3165       // If there is an epilogue which must run, there's no edge from the
3166       // middle block to exit blocks  and thus no need to update the immediate
3167       // dominator of the exit blocks.
3168       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3169   }
3170 
3171   LoopBypassBlocks.push_back(SCEVCheckBlock);
3172   AddedSafetyChecks = true;
3173   return SCEVCheckBlock;
3174 }
3175 
3176 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3177                                                       BasicBlock *Bypass) {
3178   // VPlan-native path does not do any analysis for runtime checks currently.
3179   if (EnableVPlanNativePath)
3180     return nullptr;
3181 
3182   BasicBlock *const MemCheckBlock =
3183       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3184 
3185   // Check if we generated code that checks in runtime if arrays overlap. We put
3186   // the checks into a separate block to make the more common case of few
3187   // elements faster.
3188   if (!MemCheckBlock)
3189     return nullptr;
3190 
3191   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3192     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3193            "Cannot emit memory checks when optimizing for size, unless forced "
3194            "to vectorize.");
3195     ORE->emit([&]() {
3196       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3197                                         L->getStartLoc(), L->getHeader())
3198              << "Code-size may be reduced by not forcing "
3199                 "vectorization, or by source-code modifications "
3200                 "eliminating the need for runtime checks "
3201                 "(e.g., adding 'restrict').";
3202     });
3203   }
3204 
3205   LoopBypassBlocks.push_back(MemCheckBlock);
3206 
3207   AddedSafetyChecks = true;
3208 
3209   // We currently don't use LoopVersioning for the actual loop cloning but we
3210   // still use it to add the noalias metadata.
3211   LVer = std::make_unique<LoopVersioning>(
3212       *Legal->getLAI(),
3213       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3214       DT, PSE.getSE());
3215   LVer->prepareNoAliasMetadata();
3216   return MemCheckBlock;
3217 }
3218 
3219 Value *InnerLoopVectorizer::emitTransformedIndex(
3220     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3221     const InductionDescriptor &ID, BasicBlock *VectorHeader) const {
3222 
3223   SCEVExpander Exp(*SE, DL, "induction");
3224   auto Step = ID.getStep();
3225   auto StartValue = ID.getStartValue();
3226   assert(Index->getType()->getScalarType() == Step->getType() &&
3227          "Index scalar type does not match StepValue type");
3228 
3229   // Note: the IR at this point is broken. We cannot use SE to create any new
3230   // SCEV and then expand it, hoping that SCEV's simplification will give us
3231   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3232   // lead to various SCEV crashes. So all we can do is to use builder and rely
3233   // on InstCombine for future simplifications. Here we handle some trivial
3234   // cases only.
3235   auto CreateAdd = [&B](Value *X, Value *Y) {
3236     assert(X->getType() == Y->getType() && "Types don't match!");
3237     if (auto *CX = dyn_cast<ConstantInt>(X))
3238       if (CX->isZero())
3239         return Y;
3240     if (auto *CY = dyn_cast<ConstantInt>(Y))
3241       if (CY->isZero())
3242         return X;
3243     return B.CreateAdd(X, Y);
3244   };
3245 
3246   // We allow X to be a vector type, in which case Y will potentially be
3247   // splatted into a vector with the same element count.
3248   auto CreateMul = [&B](Value *X, Value *Y) {
3249     assert(X->getType()->getScalarType() == Y->getType() &&
3250            "Types don't match!");
3251     if (auto *CX = dyn_cast<ConstantInt>(X))
3252       if (CX->isOne())
3253         return Y;
3254     if (auto *CY = dyn_cast<ConstantInt>(Y))
3255       if (CY->isOne())
3256         return X;
3257     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3258     if (XVTy && !isa<VectorType>(Y->getType()))
3259       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3260     return B.CreateMul(X, Y);
3261   };
3262 
3263   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3264   // loop, choose the end of the vector loop header (=VectorHeader), because
3265   // the DomTree is not kept up-to-date for additional blocks generated in the
3266   // vector loop. By using the header as insertion point, we guarantee that the
3267   // expanded instructions dominate all their uses.
3268   auto GetInsertPoint = [this, &B, VectorHeader]() {
3269     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3270     if (InsertBB != LoopVectorBody &&
3271         LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB))
3272       return VectorHeader->getTerminator();
3273     return &*B.GetInsertPoint();
3274   };
3275 
3276   switch (ID.getKind()) {
3277   case InductionDescriptor::IK_IntInduction: {
3278     assert(!isa<VectorType>(Index->getType()) &&
3279            "Vector indices not supported for integer inductions yet");
3280     assert(Index->getType() == StartValue->getType() &&
3281            "Index type does not match StartValue type");
3282     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3283       return B.CreateSub(StartValue, Index);
3284     auto *Offset = CreateMul(
3285         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3286     return CreateAdd(StartValue, Offset);
3287   }
3288   case InductionDescriptor::IK_PtrInduction: {
3289     assert(isa<SCEVConstant>(Step) &&
3290            "Expected constant step for pointer induction");
3291     return B.CreateGEP(
3292         ID.getElementType(), StartValue,
3293         CreateMul(Index,
3294                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3295                                     GetInsertPoint())));
3296   }
3297   case InductionDescriptor::IK_FpInduction: {
3298     assert(!isa<VectorType>(Index->getType()) &&
3299            "Vector indices not supported for FP inductions yet");
3300     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3301     auto InductionBinOp = ID.getInductionBinOp();
3302     assert(InductionBinOp &&
3303            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3304             InductionBinOp->getOpcode() == Instruction::FSub) &&
3305            "Original bin op should be defined for FP induction");
3306 
3307     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3308     Value *MulExp = B.CreateFMul(StepValue, Index);
3309     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3310                          "induction");
3311   }
3312   case InductionDescriptor::IK_NoInduction:
3313     return nullptr;
3314   }
3315   llvm_unreachable("invalid enum");
3316 }
3317 
3318 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3319   LoopScalarBody = OrigLoop->getHeader();
3320   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3321   assert(LoopVectorPreHeader && "Invalid loop structure");
3322   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3323   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3324          "multiple exit loop without required epilogue?");
3325 
3326   LoopMiddleBlock =
3327       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3328                  LI, nullptr, Twine(Prefix) + "middle.block");
3329   LoopScalarPreHeader =
3330       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3331                  nullptr, Twine(Prefix) + "scalar.ph");
3332 
3333   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3334 
3335   // Set up the middle block terminator.  Two cases:
3336   // 1) If we know that we must execute the scalar epilogue, emit an
3337   //    unconditional branch.
3338   // 2) Otherwise, we must have a single unique exit block (due to how we
3339   //    implement the multiple exit case).  In this case, set up a conditonal
3340   //    branch from the middle block to the loop scalar preheader, and the
3341   //    exit block.  completeLoopSkeleton will update the condition to use an
3342   //    iteration check, if required to decide whether to execute the remainder.
3343   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3344     BranchInst::Create(LoopScalarPreHeader) :
3345     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3346                        Builder.getTrue());
3347   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3348   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3349 
3350   // We intentionally don't let SplitBlock to update LoopInfo since
3351   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3352   // LoopVectorBody is explicitly added to the correct place few lines later.
3353   LoopVectorBody =
3354       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3355                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3356 
3357   // Update dominator for loop exit.
3358   if (!Cost->requiresScalarEpilogue(VF))
3359     // If there is an epilogue which must run, there's no edge from the
3360     // middle block to exit blocks  and thus no need to update the immediate
3361     // dominator of the exit blocks.
3362     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3363 
3364   // Create and register the new vector loop.
3365   Loop *Lp = LI->AllocateLoop();
3366   Loop *ParentLoop = OrigLoop->getParentLoop();
3367 
3368   // Insert the new loop into the loop nest and register the new basic blocks
3369   // before calling any utilities such as SCEV that require valid LoopInfo.
3370   if (ParentLoop) {
3371     ParentLoop->addChildLoop(Lp);
3372   } else {
3373     LI->addTopLevelLoop(Lp);
3374   }
3375   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3376   return Lp;
3377 }
3378 
3379 void InnerLoopVectorizer::createInductionResumeValues(
3380     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3381   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3382           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3383          "Inconsistent information about additional bypass.");
3384 
3385   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3386   assert(VectorTripCount && L && "Expected valid arguments");
3387   // We are going to resume the execution of the scalar loop.
3388   // Go over all of the induction variables that we found and fix the
3389   // PHIs that are left in the scalar version of the loop.
3390   // The starting values of PHI nodes depend on the counter of the last
3391   // iteration in the vectorized loop.
3392   // If we come from a bypass edge then we need to start from the original
3393   // start value.
3394   Instruction *OldInduction = Legal->getPrimaryInduction();
3395   for (auto &InductionEntry : Legal->getInductionVars()) {
3396     PHINode *OrigPhi = InductionEntry.first;
3397     InductionDescriptor II = InductionEntry.second;
3398 
3399     // Create phi nodes to merge from the  backedge-taken check block.
3400     PHINode *BCResumeVal =
3401         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3402                         LoopScalarPreHeader->getTerminator());
3403     // Copy original phi DL over to the new one.
3404     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3405     Value *&EndValue = IVEndValues[OrigPhi];
3406     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3407     if (OrigPhi == OldInduction) {
3408       // We know what the end value is.
3409       EndValue = VectorTripCount;
3410     } else {
3411       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3412 
3413       // Fast-math-flags propagate from the original induction instruction.
3414       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3415         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3416 
3417       Type *StepType = II.getStep()->getType();
3418       Instruction::CastOps CastOp =
3419           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3420       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3421       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3422       EndValue =
3423           emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3424       EndValue->setName("ind.end");
3425 
3426       // Compute the end value for the additional bypass (if applicable).
3427       if (AdditionalBypass.first) {
3428         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3429         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3430                                          StepType, true);
3431         CRD =
3432             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3433         EndValueFromAdditionalBypass =
3434             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3435         EndValueFromAdditionalBypass->setName("ind.end");
3436       }
3437     }
3438     // The new PHI merges the original incoming value, in case of a bypass,
3439     // or the value at the end of the vectorized loop.
3440     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3441 
3442     // Fix the scalar body counter (PHI node).
3443     // The old induction's phi node in the scalar body needs the truncated
3444     // value.
3445     for (BasicBlock *BB : LoopBypassBlocks)
3446       BCResumeVal->addIncoming(II.getStartValue(), BB);
3447 
3448     if (AdditionalBypass.first)
3449       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3450                                             EndValueFromAdditionalBypass);
3451 
3452     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3453   }
3454 }
3455 
3456 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3457                                                       MDNode *OrigLoopID) {
3458   assert(L && "Expected valid loop.");
3459 
3460   // The trip counts should be cached by now.
3461   Value *Count = getOrCreateTripCount(L);
3462   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3463 
3464   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3465 
3466   // Add a check in the middle block to see if we have completed
3467   // all of the iterations in the first vector loop.  Three cases:
3468   // 1) If we require a scalar epilogue, there is no conditional branch as
3469   //    we unconditionally branch to the scalar preheader.  Do nothing.
3470   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3471   //    Thus if tail is to be folded, we know we don't need to run the
3472   //    remainder and we can use the previous value for the condition (true).
3473   // 3) Otherwise, construct a runtime check.
3474   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3475     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3476                                         Count, VectorTripCount, "cmp.n",
3477                                         LoopMiddleBlock->getTerminator());
3478 
3479     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3480     // of the corresponding compare because they may have ended up with
3481     // different line numbers and we want to avoid awkward line stepping while
3482     // debugging. Eg. if the compare has got a line number inside the loop.
3483     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3484     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3485   }
3486 
3487   // Get ready to start creating new instructions into the vectorized body.
3488   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3489          "Inconsistent vector loop preheader");
3490   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3491 
3492 #ifdef EXPENSIVE_CHECKS
3493   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3494   LI->verify(*DT);
3495 #endif
3496 
3497   return LoopVectorPreHeader;
3498 }
3499 
3500 std::pair<BasicBlock *, Value *>
3501 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3502   /*
3503    In this function we generate a new loop. The new loop will contain
3504    the vectorized instructions while the old loop will continue to run the
3505    scalar remainder.
3506 
3507        [ ] <-- loop iteration number check.
3508     /   |
3509    /    v
3510   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3511   |  /  |
3512   | /   v
3513   ||   [ ]     <-- vector pre header.
3514   |/    |
3515   |     v
3516   |    [  ] \
3517   |    [  ]_|   <-- vector loop.
3518   |     |
3519   |     v
3520   \   -[ ]   <--- middle-block.
3521    \/   |
3522    /\   v
3523    | ->[ ]     <--- new preheader.
3524    |    |
3525  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3526    |   [ ] \
3527    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3528     \   |
3529      \  v
3530       >[ ]     <-- exit block(s).
3531    ...
3532    */
3533 
3534   // Get the metadata of the original loop before it gets modified.
3535   MDNode *OrigLoopID = OrigLoop->getLoopID();
3536 
3537   // Workaround!  Compute the trip count of the original loop and cache it
3538   // before we start modifying the CFG.  This code has a systemic problem
3539   // wherein it tries to run analysis over partially constructed IR; this is
3540   // wrong, and not simply for SCEV.  The trip count of the original loop
3541   // simply happens to be prone to hitting this in practice.  In theory, we
3542   // can hit the same issue for any SCEV, or ValueTracking query done during
3543   // mutation.  See PR49900.
3544   getOrCreateTripCount(OrigLoop);
3545 
3546   // Create an empty vector loop, and prepare basic blocks for the runtime
3547   // checks.
3548   Loop *Lp = createVectorLoopSkeleton("");
3549 
3550   // Now, compare the new count to zero. If it is zero skip the vector loop and
3551   // jump to the scalar loop. This check also covers the case where the
3552   // backedge-taken count is uint##_max: adding one to it will overflow leading
3553   // to an incorrect trip count of zero. In this (rare) case we will also jump
3554   // to the scalar loop.
3555   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3556 
3557   // Generate the code to check any assumptions that we've made for SCEV
3558   // expressions.
3559   emitSCEVChecks(Lp, LoopScalarPreHeader);
3560 
3561   // Generate the code that checks in runtime if arrays overlap. We put the
3562   // checks into a separate block to make the more common case of few elements
3563   // faster.
3564   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3565 
3566   createHeaderBranch(Lp);
3567 
3568   // Emit phis for the new starting index of the scalar loop.
3569   createInductionResumeValues(Lp);
3570 
3571   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3572 }
3573 
3574 // Fix up external users of the induction variable. At this point, we are
3575 // in LCSSA form, with all external PHIs that use the IV having one input value,
3576 // coming from the remainder loop. We need those PHIs to also have a correct
3577 // value for the IV when arriving directly from the middle block.
3578 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3579                                        const InductionDescriptor &II,
3580                                        Value *CountRoundDown, Value *EndValue,
3581                                        BasicBlock *MiddleBlock) {
3582   // There are two kinds of external IV usages - those that use the value
3583   // computed in the last iteration (the PHI) and those that use the penultimate
3584   // value (the value that feeds into the phi from the loop latch).
3585   // We allow both, but they, obviously, have different values.
3586 
3587   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3588 
3589   DenseMap<Value *, Value *> MissingVals;
3590 
3591   // An external user of the last iteration's value should see the value that
3592   // the remainder loop uses to initialize its own IV.
3593   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3594   for (User *U : PostInc->users()) {
3595     Instruction *UI = cast<Instruction>(U);
3596     if (!OrigLoop->contains(UI)) {
3597       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3598       MissingVals[UI] = EndValue;
3599     }
3600   }
3601 
3602   // An external user of the penultimate value need to see EndValue - Step.
3603   // The simplest way to get this is to recompute it from the constituent SCEVs,
3604   // that is Start + (Step * (CRD - 1)).
3605   for (User *U : OrigPhi->users()) {
3606     auto *UI = cast<Instruction>(U);
3607     if (!OrigLoop->contains(UI)) {
3608       const DataLayout &DL =
3609           OrigLoop->getHeader()->getModule()->getDataLayout();
3610       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3611 
3612       IRBuilder<> B(MiddleBlock->getTerminator());
3613 
3614       // Fast-math-flags propagate from the original induction instruction.
3615       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3616         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3617 
3618       Value *CountMinusOne = B.CreateSub(
3619           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3620       Value *CMO =
3621           !II.getStep()->getType()->isIntegerTy()
3622               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3623                              II.getStep()->getType())
3624               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3625       CMO->setName("cast.cmo");
3626       Value *Escape =
3627           emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody);
3628       Escape->setName("ind.escape");
3629       MissingVals[UI] = Escape;
3630     }
3631   }
3632 
3633   for (auto &I : MissingVals) {
3634     PHINode *PHI = cast<PHINode>(I.first);
3635     // One corner case we have to handle is two IVs "chasing" each-other,
3636     // that is %IV2 = phi [...], [ %IV1, %latch ]
3637     // In this case, if IV1 has an external use, we need to avoid adding both
3638     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3639     // don't already have an incoming value for the middle block.
3640     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3641       PHI->addIncoming(I.second, MiddleBlock);
3642   }
3643 }
3644 
3645 namespace {
3646 
3647 struct CSEDenseMapInfo {
3648   static bool canHandle(const Instruction *I) {
3649     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3650            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3651   }
3652 
3653   static inline Instruction *getEmptyKey() {
3654     return DenseMapInfo<Instruction *>::getEmptyKey();
3655   }
3656 
3657   static inline Instruction *getTombstoneKey() {
3658     return DenseMapInfo<Instruction *>::getTombstoneKey();
3659   }
3660 
3661   static unsigned getHashValue(const Instruction *I) {
3662     assert(canHandle(I) && "Unknown instruction!");
3663     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3664                                                            I->value_op_end()));
3665   }
3666 
3667   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3668     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3669         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3670       return LHS == RHS;
3671     return LHS->isIdenticalTo(RHS);
3672   }
3673 };
3674 
3675 } // end anonymous namespace
3676 
3677 ///Perform cse of induction variable instructions.
3678 static void cse(BasicBlock *BB) {
3679   // Perform simple cse.
3680   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3681   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3682     if (!CSEDenseMapInfo::canHandle(&In))
3683       continue;
3684 
3685     // Check if we can replace this instruction with any of the
3686     // visited instructions.
3687     if (Instruction *V = CSEMap.lookup(&In)) {
3688       In.replaceAllUsesWith(V);
3689       In.eraseFromParent();
3690       continue;
3691     }
3692 
3693     CSEMap[&In] = &In;
3694   }
3695 }
3696 
3697 InstructionCost
3698 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3699                                               bool &NeedToScalarize) const {
3700   Function *F = CI->getCalledFunction();
3701   Type *ScalarRetTy = CI->getType();
3702   SmallVector<Type *, 4> Tys, ScalarTys;
3703   for (auto &ArgOp : CI->args())
3704     ScalarTys.push_back(ArgOp->getType());
3705 
3706   // Estimate cost of scalarized vector call. The source operands are assumed
3707   // to be vectors, so we need to extract individual elements from there,
3708   // execute VF scalar calls, and then gather the result into the vector return
3709   // value.
3710   InstructionCost ScalarCallCost =
3711       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3712   if (VF.isScalar())
3713     return ScalarCallCost;
3714 
3715   // Compute corresponding vector type for return value and arguments.
3716   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3717   for (Type *ScalarTy : ScalarTys)
3718     Tys.push_back(ToVectorTy(ScalarTy, VF));
3719 
3720   // Compute costs of unpacking argument values for the scalar calls and
3721   // packing the return values to a vector.
3722   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3723 
3724   InstructionCost Cost =
3725       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3726 
3727   // If we can't emit a vector call for this function, then the currently found
3728   // cost is the cost we need to return.
3729   NeedToScalarize = true;
3730   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3731   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3732 
3733   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3734     return Cost;
3735 
3736   // If the corresponding vector cost is cheaper, return its cost.
3737   InstructionCost VectorCallCost =
3738       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3739   if (VectorCallCost < Cost) {
3740     NeedToScalarize = false;
3741     Cost = VectorCallCost;
3742   }
3743   return Cost;
3744 }
3745 
3746 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3747   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3748     return Elt;
3749   return VectorType::get(Elt, VF);
3750 }
3751 
3752 InstructionCost
3753 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3754                                                    ElementCount VF) const {
3755   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3756   assert(ID && "Expected intrinsic call!");
3757   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3758   FastMathFlags FMF;
3759   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3760     FMF = FPMO->getFastMathFlags();
3761 
3762   SmallVector<const Value *> Arguments(CI->args());
3763   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3764   SmallVector<Type *> ParamTys;
3765   std::transform(FTy->param_begin(), FTy->param_end(),
3766                  std::back_inserter(ParamTys),
3767                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3768 
3769   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3770                                     dyn_cast<IntrinsicInst>(CI));
3771   return TTI.getIntrinsicInstrCost(CostAttrs,
3772                                    TargetTransformInfo::TCK_RecipThroughput);
3773 }
3774 
3775 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3776   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3777   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3778   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3779 }
3780 
3781 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3782   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3783   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3784   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3785 }
3786 
3787 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3788   // For every instruction `I` in MinBWs, truncate the operands, create a
3789   // truncated version of `I` and reextend its result. InstCombine runs
3790   // later and will remove any ext/trunc pairs.
3791   SmallPtrSet<Value *, 4> Erased;
3792   for (const auto &KV : Cost->getMinimalBitwidths()) {
3793     // If the value wasn't vectorized, we must maintain the original scalar
3794     // type. The absence of the value from State indicates that it
3795     // wasn't vectorized.
3796     // FIXME: Should not rely on getVPValue at this point.
3797     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3798     if (!State.hasAnyVectorValue(Def))
3799       continue;
3800     for (unsigned Part = 0; Part < UF; ++Part) {
3801       Value *I = State.get(Def, Part);
3802       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3803         continue;
3804       Type *OriginalTy = I->getType();
3805       Type *ScalarTruncatedTy =
3806           IntegerType::get(OriginalTy->getContext(), KV.second);
3807       auto *TruncatedTy = VectorType::get(
3808           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3809       if (TruncatedTy == OriginalTy)
3810         continue;
3811 
3812       IRBuilder<> B(cast<Instruction>(I));
3813       auto ShrinkOperand = [&](Value *V) -> Value * {
3814         if (auto *ZI = dyn_cast<ZExtInst>(V))
3815           if (ZI->getSrcTy() == TruncatedTy)
3816             return ZI->getOperand(0);
3817         return B.CreateZExtOrTrunc(V, TruncatedTy);
3818       };
3819 
3820       // The actual instruction modification depends on the instruction type,
3821       // unfortunately.
3822       Value *NewI = nullptr;
3823       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3824         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3825                              ShrinkOperand(BO->getOperand(1)));
3826 
3827         // Any wrapping introduced by shrinking this operation shouldn't be
3828         // considered undefined behavior. So, we can't unconditionally copy
3829         // arithmetic wrapping flags to NewI.
3830         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3831       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3832         NewI =
3833             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3834                          ShrinkOperand(CI->getOperand(1)));
3835       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3836         NewI = B.CreateSelect(SI->getCondition(),
3837                               ShrinkOperand(SI->getTrueValue()),
3838                               ShrinkOperand(SI->getFalseValue()));
3839       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3840         switch (CI->getOpcode()) {
3841         default:
3842           llvm_unreachable("Unhandled cast!");
3843         case Instruction::Trunc:
3844           NewI = ShrinkOperand(CI->getOperand(0));
3845           break;
3846         case Instruction::SExt:
3847           NewI = B.CreateSExtOrTrunc(
3848               CI->getOperand(0),
3849               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3850           break;
3851         case Instruction::ZExt:
3852           NewI = B.CreateZExtOrTrunc(
3853               CI->getOperand(0),
3854               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3855           break;
3856         }
3857       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3858         auto Elements0 =
3859             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3860         auto *O0 = B.CreateZExtOrTrunc(
3861             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3862         auto Elements1 =
3863             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3864         auto *O1 = B.CreateZExtOrTrunc(
3865             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3866 
3867         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3868       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3869         // Don't do anything with the operands, just extend the result.
3870         continue;
3871       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3872         auto Elements =
3873             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3874         auto *O0 = B.CreateZExtOrTrunc(
3875             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3876         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3877         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3878       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3879         auto Elements =
3880             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3881         auto *O0 = B.CreateZExtOrTrunc(
3882             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3883         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3884       } else {
3885         // If we don't know what to do, be conservative and don't do anything.
3886         continue;
3887       }
3888 
3889       // Lastly, extend the result.
3890       NewI->takeName(cast<Instruction>(I));
3891       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3892       I->replaceAllUsesWith(Res);
3893       cast<Instruction>(I)->eraseFromParent();
3894       Erased.insert(I);
3895       State.reset(Def, Res, Part);
3896     }
3897   }
3898 
3899   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3900   for (const auto &KV : Cost->getMinimalBitwidths()) {
3901     // If the value wasn't vectorized, we must maintain the original scalar
3902     // type. The absence of the value from State indicates that it
3903     // wasn't vectorized.
3904     // FIXME: Should not rely on getVPValue at this point.
3905     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3906     if (!State.hasAnyVectorValue(Def))
3907       continue;
3908     for (unsigned Part = 0; Part < UF; ++Part) {
3909       Value *I = State.get(Def, Part);
3910       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3911       if (Inst && Inst->use_empty()) {
3912         Value *NewI = Inst->getOperand(0);
3913         Inst->eraseFromParent();
3914         State.reset(Def, NewI, Part);
3915       }
3916     }
3917   }
3918 }
3919 
3920 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3921   // Insert truncates and extends for any truncated instructions as hints to
3922   // InstCombine.
3923   if (VF.isVector())
3924     truncateToMinimalBitwidths(State);
3925 
3926   // Fix widened non-induction PHIs by setting up the PHI operands.
3927   if (OrigPHIsToFix.size()) {
3928     assert(EnableVPlanNativePath &&
3929            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3930     fixNonInductionPHIs(State);
3931   }
3932 
3933   // At this point every instruction in the original loop is widened to a
3934   // vector form. Now we need to fix the recurrences in the loop. These PHI
3935   // nodes are currently empty because we did not want to introduce cycles.
3936   // This is the second stage of vectorizing recurrences.
3937   fixCrossIterationPHIs(State);
3938 
3939   // Forget the original basic block.
3940   PSE.getSE()->forgetLoop(OrigLoop);
3941 
3942   // If we inserted an edge from the middle block to the unique exit block,
3943   // update uses outside the loop (phis) to account for the newly inserted
3944   // edge.
3945   if (!Cost->requiresScalarEpilogue(VF)) {
3946     // Fix-up external users of the induction variables.
3947     for (auto &Entry : Legal->getInductionVars())
3948       fixupIVUsers(Entry.first, Entry.second,
3949                    getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3950                    IVEndValues[Entry.first], LoopMiddleBlock);
3951 
3952     fixLCSSAPHIs(State);
3953   }
3954 
3955   for (Instruction *PI : PredicatedInstructions)
3956     sinkScalarOperands(&*PI);
3957 
3958   // Remove redundant induction instructions.
3959   cse(LoopVectorBody);
3960 
3961   // Set/update profile weights for the vector and remainder loops as original
3962   // loop iterations are now distributed among them. Note that original loop
3963   // represented by LoopScalarBody becomes remainder loop after vectorization.
3964   //
3965   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3966   // end up getting slightly roughened result but that should be OK since
3967   // profile is not inherently precise anyway. Note also possible bypass of
3968   // vector code caused by legality checks is ignored, assigning all the weight
3969   // to the vector loop, optimistically.
3970   //
3971   // For scalable vectorization we can't know at compile time how many iterations
3972   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3973   // vscale of '1'.
3974   setProfileInfoAfterUnrolling(
3975       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
3976       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
3977 }
3978 
3979 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3980   // In order to support recurrences we need to be able to vectorize Phi nodes.
3981   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3982   // stage #2: We now need to fix the recurrences by adding incoming edges to
3983   // the currently empty PHI nodes. At this point every instruction in the
3984   // original loop is widened to a vector form so we can use them to construct
3985   // the incoming edges.
3986   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
3987   for (VPRecipeBase &R : Header->phis()) {
3988     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
3989       fixReduction(ReductionPhi, State);
3990     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3991       fixFirstOrderRecurrence(FOR, State);
3992   }
3993 }
3994 
3995 void InnerLoopVectorizer::fixFirstOrderRecurrence(
3996     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
3997   // This is the second phase of vectorizing first-order recurrences. An
3998   // overview of the transformation is described below. Suppose we have the
3999   // following loop.
4000   //
4001   //   for (int i = 0; i < n; ++i)
4002   //     b[i] = a[i] - a[i - 1];
4003   //
4004   // There is a first-order recurrence on "a". For this loop, the shorthand
4005   // scalar IR looks like:
4006   //
4007   //   scalar.ph:
4008   //     s_init = a[-1]
4009   //     br scalar.body
4010   //
4011   //   scalar.body:
4012   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4013   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4014   //     s2 = a[i]
4015   //     b[i] = s2 - s1
4016   //     br cond, scalar.body, ...
4017   //
4018   // In this example, s1 is a recurrence because it's value depends on the
4019   // previous iteration. In the first phase of vectorization, we created a
4020   // vector phi v1 for s1. We now complete the vectorization and produce the
4021   // shorthand vector IR shown below (for VF = 4, UF = 1).
4022   //
4023   //   vector.ph:
4024   //     v_init = vector(..., ..., ..., a[-1])
4025   //     br vector.body
4026   //
4027   //   vector.body
4028   //     i = phi [0, vector.ph], [i+4, vector.body]
4029   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4030   //     v2 = a[i, i+1, i+2, i+3];
4031   //     v3 = vector(v1(3), v2(0, 1, 2))
4032   //     b[i, i+1, i+2, i+3] = v2 - v3
4033   //     br cond, vector.body, middle.block
4034   //
4035   //   middle.block:
4036   //     x = v2(3)
4037   //     br scalar.ph
4038   //
4039   //   scalar.ph:
4040   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4041   //     br scalar.body
4042   //
4043   // After execution completes the vector loop, we extract the next value of
4044   // the recurrence (x) to use as the initial value in the scalar loop.
4045 
4046   // Extract the last vector element in the middle block. This will be the
4047   // initial value for the recurrence when jumping to the scalar loop.
4048   VPValue *PreviousDef = PhiR->getBackedgeValue();
4049   Value *Incoming = State.get(PreviousDef, UF - 1);
4050   auto *ExtractForScalar = Incoming;
4051   auto *IdxTy = Builder.getInt32Ty();
4052   if (VF.isVector()) {
4053     auto *One = ConstantInt::get(IdxTy, 1);
4054     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4055     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4056     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4057     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4058                                                     "vector.recur.extract");
4059   }
4060   // Extract the second last element in the middle block if the
4061   // Phi is used outside the loop. We need to extract the phi itself
4062   // and not the last element (the phi update in the current iteration). This
4063   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4064   // when the scalar loop is not run at all.
4065   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4066   if (VF.isVector()) {
4067     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4068     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4069     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4070         Incoming, Idx, "vector.recur.extract.for.phi");
4071   } else if (UF > 1)
4072     // When loop is unrolled without vectorizing, initialize
4073     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4074     // of `Incoming`. This is analogous to the vectorized case above: extracting
4075     // the second last element when VF > 1.
4076     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4077 
4078   // Fix the initial value of the original recurrence in the scalar loop.
4079   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4080   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4081   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4082   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4083   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4084     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4085     Start->addIncoming(Incoming, BB);
4086   }
4087 
4088   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4089   Phi->setName("scalar.recur");
4090 
4091   // Finally, fix users of the recurrence outside the loop. The users will need
4092   // either the last value of the scalar recurrence or the last value of the
4093   // vector recurrence we extracted in the middle block. Since the loop is in
4094   // LCSSA form, we just need to find all the phi nodes for the original scalar
4095   // recurrence in the exit block, and then add an edge for the middle block.
4096   // Note that LCSSA does not imply single entry when the original scalar loop
4097   // had multiple exiting edges (as we always run the last iteration in the
4098   // scalar epilogue); in that case, there is no edge from middle to exit and
4099   // and thus no phis which needed updated.
4100   if (!Cost->requiresScalarEpilogue(VF))
4101     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4102       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
4103         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4104 }
4105 
4106 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4107                                        VPTransformState &State) {
4108   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4109   // Get it's reduction variable descriptor.
4110   assert(Legal->isReductionVariable(OrigPhi) &&
4111          "Unable to find the reduction variable");
4112   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4113 
4114   RecurKind RK = RdxDesc.getRecurrenceKind();
4115   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4116   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4117   setDebugLocFromInst(ReductionStartValue);
4118 
4119   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4120   // This is the vector-clone of the value that leaves the loop.
4121   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4122 
4123   // Wrap flags are in general invalid after vectorization, clear them.
4124   clearReductionWrapFlags(RdxDesc, State);
4125 
4126   // Before each round, move the insertion point right between
4127   // the PHIs and the values we are going to write.
4128   // This allows us to write both PHINodes and the extractelement
4129   // instructions.
4130   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4131 
4132   setDebugLocFromInst(LoopExitInst);
4133 
4134   Type *PhiTy = OrigPhi->getType();
4135   // If tail is folded by masking, the vector value to leave the loop should be
4136   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4137   // instead of the former. For an inloop reduction the reduction will already
4138   // be predicated, and does not need to be handled here.
4139   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4140     for (unsigned Part = 0; Part < UF; ++Part) {
4141       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4142       Value *Sel = nullptr;
4143       for (User *U : VecLoopExitInst->users()) {
4144         if (isa<SelectInst>(U)) {
4145           assert(!Sel && "Reduction exit feeding two selects");
4146           Sel = U;
4147         } else
4148           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4149       }
4150       assert(Sel && "Reduction exit feeds no select");
4151       State.reset(LoopExitInstDef, Sel, Part);
4152 
4153       // If the target can create a predicated operator for the reduction at no
4154       // extra cost in the loop (for example a predicated vadd), it can be
4155       // cheaper for the select to remain in the loop than be sunk out of it,
4156       // and so use the select value for the phi instead of the old
4157       // LoopExitValue.
4158       if (PreferPredicatedReductionSelect ||
4159           TTI->preferPredicatedReductionSelect(
4160               RdxDesc.getOpcode(), PhiTy,
4161               TargetTransformInfo::ReductionFlags())) {
4162         auto *VecRdxPhi =
4163             cast<PHINode>(State.get(PhiR, Part));
4164         VecRdxPhi->setIncomingValueForBlock(
4165             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4166       }
4167     }
4168   }
4169 
4170   // If the vector reduction can be performed in a smaller type, we truncate
4171   // then extend the loop exit value to enable InstCombine to evaluate the
4172   // entire expression in the smaller type.
4173   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4174     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4175     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4176     Builder.SetInsertPoint(
4177         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4178     VectorParts RdxParts(UF);
4179     for (unsigned Part = 0; Part < UF; ++Part) {
4180       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4181       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4182       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4183                                         : Builder.CreateZExt(Trunc, VecTy);
4184       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
4185         if (U != Trunc) {
4186           U->replaceUsesOfWith(RdxParts[Part], Extnd);
4187           RdxParts[Part] = Extnd;
4188         }
4189     }
4190     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4191     for (unsigned Part = 0; Part < UF; ++Part) {
4192       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4193       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4194     }
4195   }
4196 
4197   // Reduce all of the unrolled parts into a single vector.
4198   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4199   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4200 
4201   // The middle block terminator has already been assigned a DebugLoc here (the
4202   // OrigLoop's single latch terminator). We want the whole middle block to
4203   // appear to execute on this line because: (a) it is all compiler generated,
4204   // (b) these instructions are always executed after evaluating the latch
4205   // conditional branch, and (c) other passes may add new predecessors which
4206   // terminate on this line. This is the easiest way to ensure we don't
4207   // accidentally cause an extra step back into the loop while debugging.
4208   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4209   if (PhiR->isOrdered())
4210     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4211   else {
4212     // Floating-point operations should have some FMF to enable the reduction.
4213     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4214     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4215     for (unsigned Part = 1; Part < UF; ++Part) {
4216       Value *RdxPart = State.get(LoopExitInstDef, Part);
4217       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4218         ReducedPartRdx = Builder.CreateBinOp(
4219             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4220       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4221         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4222                                            ReducedPartRdx, RdxPart);
4223       else
4224         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4225     }
4226   }
4227 
4228   // Create the reduction after the loop. Note that inloop reductions create the
4229   // target reduction in the loop using a Reduction recipe.
4230   if (VF.isVector() && !PhiR->isInLoop()) {
4231     ReducedPartRdx =
4232         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4233     // If the reduction can be performed in a smaller type, we need to extend
4234     // the reduction to the wider type before we branch to the original loop.
4235     if (PhiTy != RdxDesc.getRecurrenceType())
4236       ReducedPartRdx = RdxDesc.isSigned()
4237                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4238                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4239   }
4240 
4241   PHINode *ResumePhi =
4242       dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
4243 
4244   // Create a phi node that merges control-flow from the backedge-taken check
4245   // block and the middle block.
4246   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4247                                         LoopScalarPreHeader->getTerminator());
4248 
4249   // If we are fixing reductions in the epilogue loop then we should already
4250   // have created a bc.merge.rdx Phi after the main vector body. Ensure that
4251   // we carry over the incoming values correctly.
4252   for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
4253     if (Incoming == LoopMiddleBlock)
4254       BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
4255     else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
4256       BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
4257                               Incoming);
4258     else
4259       BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
4260   }
4261 
4262   // Set the resume value for this reduction
4263   ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
4264 
4265   // Now, we need to fix the users of the reduction variable
4266   // inside and outside of the scalar remainder loop.
4267 
4268   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4269   // in the exit blocks.  See comment on analogous loop in
4270   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4271   if (!Cost->requiresScalarEpilogue(VF))
4272     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4273       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4274         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4275 
4276   // Fix the scalar loop reduction variable with the incoming reduction sum
4277   // from the vector body and from the backedge value.
4278   int IncomingEdgeBlockIdx =
4279       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4280   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4281   // Pick the other block.
4282   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4283   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4284   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4285 }
4286 
4287 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4288                                                   VPTransformState &State) {
4289   RecurKind RK = RdxDesc.getRecurrenceKind();
4290   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4291     return;
4292 
4293   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4294   assert(LoopExitInstr && "null loop exit instruction");
4295   SmallVector<Instruction *, 8> Worklist;
4296   SmallPtrSet<Instruction *, 8> Visited;
4297   Worklist.push_back(LoopExitInstr);
4298   Visited.insert(LoopExitInstr);
4299 
4300   while (!Worklist.empty()) {
4301     Instruction *Cur = Worklist.pop_back_val();
4302     if (isa<OverflowingBinaryOperator>(Cur))
4303       for (unsigned Part = 0; Part < UF; ++Part) {
4304         // FIXME: Should not rely on getVPValue at this point.
4305         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4306         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4307       }
4308 
4309     for (User *U : Cur->users()) {
4310       Instruction *UI = cast<Instruction>(U);
4311       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4312           Visited.insert(UI).second)
4313         Worklist.push_back(UI);
4314     }
4315   }
4316 }
4317 
4318 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4319   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4320     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4321       // Some phis were already hand updated by the reduction and recurrence
4322       // code above, leave them alone.
4323       continue;
4324 
4325     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4326     // Non-instruction incoming values will have only one value.
4327 
4328     VPLane Lane = VPLane::getFirstLane();
4329     if (isa<Instruction>(IncomingValue) &&
4330         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4331                                            VF))
4332       Lane = VPLane::getLastLaneForVF(VF);
4333 
4334     // Can be a loop invariant incoming value or the last scalar value to be
4335     // extracted from the vectorized loop.
4336     // FIXME: Should not rely on getVPValue at this point.
4337     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4338     Value *lastIncomingValue =
4339         OrigLoop->isLoopInvariant(IncomingValue)
4340             ? IncomingValue
4341             : State.get(State.Plan->getVPValue(IncomingValue, true),
4342                         VPIteration(UF - 1, Lane));
4343     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4344   }
4345 }
4346 
4347 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4348   // The basic block and loop containing the predicated instruction.
4349   auto *PredBB = PredInst->getParent();
4350   auto *VectorLoop = LI->getLoopFor(PredBB);
4351 
4352   // Initialize a worklist with the operands of the predicated instruction.
4353   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4354 
4355   // Holds instructions that we need to analyze again. An instruction may be
4356   // reanalyzed if we don't yet know if we can sink it or not.
4357   SmallVector<Instruction *, 8> InstsToReanalyze;
4358 
4359   // Returns true if a given use occurs in the predicated block. Phi nodes use
4360   // their operands in their corresponding predecessor blocks.
4361   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4362     auto *I = cast<Instruction>(U.getUser());
4363     BasicBlock *BB = I->getParent();
4364     if (auto *Phi = dyn_cast<PHINode>(I))
4365       BB = Phi->getIncomingBlock(
4366           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4367     return BB == PredBB;
4368   };
4369 
4370   // Iteratively sink the scalarized operands of the predicated instruction
4371   // into the block we created for it. When an instruction is sunk, it's
4372   // operands are then added to the worklist. The algorithm ends after one pass
4373   // through the worklist doesn't sink a single instruction.
4374   bool Changed;
4375   do {
4376     // Add the instructions that need to be reanalyzed to the worklist, and
4377     // reset the changed indicator.
4378     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4379     InstsToReanalyze.clear();
4380     Changed = false;
4381 
4382     while (!Worklist.empty()) {
4383       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4384 
4385       // We can't sink an instruction if it is a phi node, is not in the loop,
4386       // or may have side effects.
4387       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4388           I->mayHaveSideEffects())
4389         continue;
4390 
4391       // If the instruction is already in PredBB, check if we can sink its
4392       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4393       // sinking the scalar instruction I, hence it appears in PredBB; but it
4394       // may have failed to sink I's operands (recursively), which we try
4395       // (again) here.
4396       if (I->getParent() == PredBB) {
4397         Worklist.insert(I->op_begin(), I->op_end());
4398         continue;
4399       }
4400 
4401       // It's legal to sink the instruction if all its uses occur in the
4402       // predicated block. Otherwise, there's nothing to do yet, and we may
4403       // need to reanalyze the instruction.
4404       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4405         InstsToReanalyze.push_back(I);
4406         continue;
4407       }
4408 
4409       // Move the instruction to the beginning of the predicated block, and add
4410       // it's operands to the worklist.
4411       I->moveBefore(&*PredBB->getFirstInsertionPt());
4412       Worklist.insert(I->op_begin(), I->op_end());
4413 
4414       // The sinking may have enabled other instructions to be sunk, so we will
4415       // need to iterate.
4416       Changed = true;
4417     }
4418   } while (Changed);
4419 }
4420 
4421 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4422   for (PHINode *OrigPhi : OrigPHIsToFix) {
4423     VPWidenPHIRecipe *VPPhi =
4424         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4425     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4426     // Make sure the builder has a valid insert point.
4427     Builder.SetInsertPoint(NewPhi);
4428     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4429       VPValue *Inc = VPPhi->getIncomingValue(i);
4430       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4431       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4432     }
4433   }
4434 }
4435 
4436 bool InnerLoopVectorizer::useOrderedReductions(
4437     const RecurrenceDescriptor &RdxDesc) {
4438   return Cost->useOrderedReductions(RdxDesc);
4439 }
4440 
4441 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4442                                               VPWidenPHIRecipe *PhiR,
4443                                               VPTransformState &State) {
4444   PHINode *P = cast<PHINode>(PN);
4445   if (EnableVPlanNativePath) {
4446     // Currently we enter here in the VPlan-native path for non-induction
4447     // PHIs where all control flow is uniform. We simply widen these PHIs.
4448     // Create a vector phi with no operands - the vector phi operands will be
4449     // set at the end of vector code generation.
4450     Type *VecTy = (State.VF.isScalar())
4451                       ? PN->getType()
4452                       : VectorType::get(PN->getType(), State.VF);
4453     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4454     State.set(PhiR, VecPhi, 0);
4455     OrigPHIsToFix.push_back(P);
4456 
4457     return;
4458   }
4459 
4460   assert(PN->getParent() == OrigLoop->getHeader() &&
4461          "Non-header phis should have been handled elsewhere");
4462 
4463   // In order to support recurrences we need to be able to vectorize Phi nodes.
4464   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4465   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4466   // this value when we vectorize all of the instructions that use the PHI.
4467 
4468   assert(!Legal->isReductionVariable(P) &&
4469          "reductions should be handled elsewhere");
4470 
4471   setDebugLocFromInst(P);
4472 
4473   // This PHINode must be an induction variable.
4474   // Make sure that we know about it.
4475   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4476 
4477   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4478   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4479 
4480   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4481   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4482 
4483   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4484   // which can be found from the original scalar operations.
4485   switch (II.getKind()) {
4486   case InductionDescriptor::IK_NoInduction:
4487     llvm_unreachable("Unknown induction");
4488   case InductionDescriptor::IK_IntInduction:
4489   case InductionDescriptor::IK_FpInduction:
4490     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4491   case InductionDescriptor::IK_PtrInduction: {
4492     // Handle the pointer induction variable case.
4493     assert(P->getType()->isPointerTy() && "Unexpected type.");
4494 
4495     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4496       // This is the normalized GEP that starts counting at zero.
4497       Value *PtrInd =
4498           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4499       // Determine the number of scalars we need to generate for each unroll
4500       // iteration. If the instruction is uniform, we only need to generate the
4501       // first lane. Otherwise, we generate all VF values.
4502       bool IsUniform = vputils::onlyFirstLaneUsed(PhiR);
4503       assert((IsUniform || !State.VF.isScalable()) &&
4504              "Cannot scalarize a scalable VF");
4505       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4506 
4507       for (unsigned Part = 0; Part < UF; ++Part) {
4508         Value *PartStart =
4509             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4510 
4511         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4512           Value *Idx = Builder.CreateAdd(
4513               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4514           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4515           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(),
4516                                                 DL, II, State.CFG.PrevBB);
4517           SclrGep->setName("next.gep");
4518           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4519         }
4520       }
4521       return;
4522     }
4523     assert(isa<SCEVConstant>(II.getStep()) &&
4524            "Induction step not a SCEV constant!");
4525     Type *PhiType = II.getStep()->getType();
4526 
4527     // Build a pointer phi
4528     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4529     Type *ScStValueType = ScalarStartValue->getType();
4530     PHINode *NewPointerPhi =
4531         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4532     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4533 
4534     // A pointer induction, performed by using a gep
4535     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4536     Instruction *InductionLoc = LoopLatch->getTerminator();
4537     const SCEV *ScalarStep = II.getStep();
4538     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4539     Value *ScalarStepValue =
4540         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4541     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4542     Value *NumUnrolledElems =
4543         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4544     Value *InductionGEP = GetElementPtrInst::Create(
4545         II.getElementType(), NewPointerPhi,
4546         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4547         InductionLoc);
4548     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4549 
4550     // Create UF many actual address geps that use the pointer
4551     // phi as base and a vectorized version of the step value
4552     // (<step*0, ..., step*N>) as offset.
4553     for (unsigned Part = 0; Part < State.UF; ++Part) {
4554       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4555       Value *StartOffsetScalar =
4556           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4557       Value *StartOffset =
4558           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4559       // Create a vector of consecutive numbers from zero to VF.
4560       StartOffset =
4561           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4562 
4563       Value *GEP = Builder.CreateGEP(
4564           II.getElementType(), NewPointerPhi,
4565           Builder.CreateMul(
4566               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4567               "vector.gep"));
4568       State.set(PhiR, GEP, Part);
4569     }
4570   }
4571   }
4572 }
4573 
4574 /// A helper function for checking whether an integer division-related
4575 /// instruction may divide by zero (in which case it must be predicated if
4576 /// executed conditionally in the scalar code).
4577 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4578 /// Non-zero divisors that are non compile-time constants will not be
4579 /// converted into multiplication, so we will still end up scalarizing
4580 /// the division, but can do so w/o predication.
4581 static bool mayDivideByZero(Instruction &I) {
4582   assert((I.getOpcode() == Instruction::UDiv ||
4583           I.getOpcode() == Instruction::SDiv ||
4584           I.getOpcode() == Instruction::URem ||
4585           I.getOpcode() == Instruction::SRem) &&
4586          "Unexpected instruction");
4587   Value *Divisor = I.getOperand(1);
4588   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4589   return !CInt || CInt->isZero();
4590 }
4591 
4592 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4593                                                VPUser &ArgOperands,
4594                                                VPTransformState &State) {
4595   assert(!isa<DbgInfoIntrinsic>(I) &&
4596          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4597   setDebugLocFromInst(&I);
4598 
4599   Module *M = I.getParent()->getParent()->getParent();
4600   auto *CI = cast<CallInst>(&I);
4601 
4602   SmallVector<Type *, 4> Tys;
4603   for (Value *ArgOperand : CI->args())
4604     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4605 
4606   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4607 
4608   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4609   // version of the instruction.
4610   // Is it beneficial to perform intrinsic call compared to lib call?
4611   bool NeedToScalarize = false;
4612   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4613   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4614   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4615   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4616          "Instruction should be scalarized elsewhere.");
4617   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4618          "Either the intrinsic cost or vector call cost must be valid");
4619 
4620   for (unsigned Part = 0; Part < UF; ++Part) {
4621     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4622     SmallVector<Value *, 4> Args;
4623     for (auto &I : enumerate(ArgOperands.operands())) {
4624       // Some intrinsics have a scalar argument - don't replace it with a
4625       // vector.
4626       Value *Arg;
4627       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4628         Arg = State.get(I.value(), Part);
4629       else {
4630         Arg = State.get(I.value(), VPIteration(0, 0));
4631         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4632           TysForDecl.push_back(Arg->getType());
4633       }
4634       Args.push_back(Arg);
4635     }
4636 
4637     Function *VectorF;
4638     if (UseVectorIntrinsic) {
4639       // Use vector version of the intrinsic.
4640       if (VF.isVector())
4641         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4642       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4643       assert(VectorF && "Can't retrieve vector intrinsic.");
4644     } else {
4645       // Use vector version of the function call.
4646       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4647 #ifndef NDEBUG
4648       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4649              "Can't create vector function.");
4650 #endif
4651         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4652     }
4653       SmallVector<OperandBundleDef, 1> OpBundles;
4654       CI->getOperandBundlesAsDefs(OpBundles);
4655       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4656 
4657       if (isa<FPMathOperator>(V))
4658         V->copyFastMathFlags(CI);
4659 
4660       State.set(Def, V, Part);
4661       addMetadata(V, &I);
4662   }
4663 }
4664 
4665 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4666   // We should not collect Scalars more than once per VF. Right now, this
4667   // function is called from collectUniformsAndScalars(), which already does
4668   // this check. Collecting Scalars for VF=1 does not make any sense.
4669   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4670          "This function should not be visited twice for the same VF");
4671 
4672   SmallSetVector<Instruction *, 8> Worklist;
4673 
4674   // These sets are used to seed the analysis with pointers used by memory
4675   // accesses that will remain scalar.
4676   SmallSetVector<Instruction *, 8> ScalarPtrs;
4677   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4678   auto *Latch = TheLoop->getLoopLatch();
4679 
4680   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4681   // The pointer operands of loads and stores will be scalar as long as the
4682   // memory access is not a gather or scatter operation. The value operand of a
4683   // store will remain scalar if the store is scalarized.
4684   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4685     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4686     assert(WideningDecision != CM_Unknown &&
4687            "Widening decision should be ready at this moment");
4688     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4689       if (Ptr == Store->getValueOperand())
4690         return WideningDecision == CM_Scalarize;
4691     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4692            "Ptr is neither a value or pointer operand");
4693     return WideningDecision != CM_GatherScatter;
4694   };
4695 
4696   // A helper that returns true if the given value is a bitcast or
4697   // getelementptr instruction contained in the loop.
4698   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4699     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4700             isa<GetElementPtrInst>(V)) &&
4701            !TheLoop->isLoopInvariant(V);
4702   };
4703 
4704   // A helper that evaluates a memory access's use of a pointer. If the use will
4705   // be a scalar use and the pointer is only used by memory accesses, we place
4706   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4707   // PossibleNonScalarPtrs.
4708   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4709     // We only care about bitcast and getelementptr instructions contained in
4710     // the loop.
4711     if (!isLoopVaryingBitCastOrGEP(Ptr))
4712       return;
4713 
4714     // If the pointer has already been identified as scalar (e.g., if it was
4715     // also identified as uniform), there's nothing to do.
4716     auto *I = cast<Instruction>(Ptr);
4717     if (Worklist.count(I))
4718       return;
4719 
4720     // If the use of the pointer will be a scalar use, and all users of the
4721     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4722     // place the pointer in PossibleNonScalarPtrs.
4723     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4724           return isa<LoadInst>(U) || isa<StoreInst>(U);
4725         }))
4726       ScalarPtrs.insert(I);
4727     else
4728       PossibleNonScalarPtrs.insert(I);
4729   };
4730 
4731   // We seed the scalars analysis with three classes of instructions: (1)
4732   // instructions marked uniform-after-vectorization and (2) bitcast,
4733   // getelementptr and (pointer) phi instructions used by memory accesses
4734   // requiring a scalar use.
4735   //
4736   // (1) Add to the worklist all instructions that have been identified as
4737   // uniform-after-vectorization.
4738   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4739 
4740   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4741   // memory accesses requiring a scalar use. The pointer operands of loads and
4742   // stores will be scalar as long as the memory accesses is not a gather or
4743   // scatter operation. The value operand of a store will remain scalar if the
4744   // store is scalarized.
4745   for (auto *BB : TheLoop->blocks())
4746     for (auto &I : *BB) {
4747       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4748         evaluatePtrUse(Load, Load->getPointerOperand());
4749       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4750         evaluatePtrUse(Store, Store->getPointerOperand());
4751         evaluatePtrUse(Store, Store->getValueOperand());
4752       }
4753     }
4754   for (auto *I : ScalarPtrs)
4755     if (!PossibleNonScalarPtrs.count(I)) {
4756       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4757       Worklist.insert(I);
4758     }
4759 
4760   // Insert the forced scalars.
4761   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4762   // induction variable when the PHI user is scalarized.
4763   auto ForcedScalar = ForcedScalars.find(VF);
4764   if (ForcedScalar != ForcedScalars.end())
4765     for (auto *I : ForcedScalar->second)
4766       Worklist.insert(I);
4767 
4768   // Expand the worklist by looking through any bitcasts and getelementptr
4769   // instructions we've already identified as scalar. This is similar to the
4770   // expansion step in collectLoopUniforms(); however, here we're only
4771   // expanding to include additional bitcasts and getelementptr instructions.
4772   unsigned Idx = 0;
4773   while (Idx != Worklist.size()) {
4774     Instruction *Dst = Worklist[Idx++];
4775     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4776       continue;
4777     auto *Src = cast<Instruction>(Dst->getOperand(0));
4778     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4779           auto *J = cast<Instruction>(U);
4780           return !TheLoop->contains(J) || Worklist.count(J) ||
4781                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4782                   isScalarUse(J, Src));
4783         })) {
4784       Worklist.insert(Src);
4785       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4786     }
4787   }
4788 
4789   // An induction variable will remain scalar if all users of the induction
4790   // variable and induction variable update remain scalar.
4791   for (auto &Induction : Legal->getInductionVars()) {
4792     auto *Ind = Induction.first;
4793     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4794 
4795     // If tail-folding is applied, the primary induction variable will be used
4796     // to feed a vector compare.
4797     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4798       continue;
4799 
4800     // Returns true if \p Indvar is a pointer induction that is used directly by
4801     // load/store instruction \p I.
4802     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4803                                               Instruction *I) {
4804       return Induction.second.getKind() ==
4805                  InductionDescriptor::IK_PtrInduction &&
4806              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4807              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4808     };
4809 
4810     // Determine if all users of the induction variable are scalar after
4811     // vectorization.
4812     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4813       auto *I = cast<Instruction>(U);
4814       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4815              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4816     });
4817     if (!ScalarInd)
4818       continue;
4819 
4820     // Determine if all users of the induction variable update instruction are
4821     // scalar after vectorization.
4822     auto ScalarIndUpdate =
4823         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4824           auto *I = cast<Instruction>(U);
4825           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4826                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4827         });
4828     if (!ScalarIndUpdate)
4829       continue;
4830 
4831     // The induction variable and its update instruction will remain scalar.
4832     Worklist.insert(Ind);
4833     Worklist.insert(IndUpdate);
4834     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4835     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4836                       << "\n");
4837   }
4838 
4839   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4840 }
4841 
4842 bool LoopVectorizationCostModel::isScalarWithPredication(
4843     Instruction *I, ElementCount VF) const {
4844   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4845     return false;
4846   switch(I->getOpcode()) {
4847   default:
4848     break;
4849   case Instruction::Load:
4850   case Instruction::Store: {
4851     if (!Legal->isMaskRequired(I))
4852       return false;
4853     auto *Ptr = getLoadStorePointerOperand(I);
4854     auto *Ty = getLoadStoreType(I);
4855     Type *VTy = Ty;
4856     if (VF.isVector())
4857       VTy = VectorType::get(Ty, VF);
4858     const Align Alignment = getLoadStoreAlignment(I);
4859     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4860                                 TTI.isLegalMaskedGather(VTy, Alignment))
4861                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4862                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4863   }
4864   case Instruction::UDiv:
4865   case Instruction::SDiv:
4866   case Instruction::SRem:
4867   case Instruction::URem:
4868     return mayDivideByZero(*I);
4869   }
4870   return false;
4871 }
4872 
4873 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4874     Instruction *I, ElementCount VF) {
4875   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4876   assert(getWideningDecision(I, VF) == CM_Unknown &&
4877          "Decision should not be set yet.");
4878   auto *Group = getInterleavedAccessGroup(I);
4879   assert(Group && "Must have a group.");
4880 
4881   // If the instruction's allocated size doesn't equal it's type size, it
4882   // requires padding and will be scalarized.
4883   auto &DL = I->getModule()->getDataLayout();
4884   auto *ScalarTy = getLoadStoreType(I);
4885   if (hasIrregularType(ScalarTy, DL))
4886     return false;
4887 
4888   // Check if masking is required.
4889   // A Group may need masking for one of two reasons: it resides in a block that
4890   // needs predication, or it was decided to use masking to deal with gaps
4891   // (either a gap at the end of a load-access that may result in a speculative
4892   // load, or any gaps in a store-access).
4893   bool PredicatedAccessRequiresMasking =
4894       blockNeedsPredicationForAnyReason(I->getParent()) &&
4895       Legal->isMaskRequired(I);
4896   bool LoadAccessWithGapsRequiresEpilogMasking =
4897       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4898       !isScalarEpilogueAllowed();
4899   bool StoreAccessWithGapsRequiresMasking =
4900       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4901   if (!PredicatedAccessRequiresMasking &&
4902       !LoadAccessWithGapsRequiresEpilogMasking &&
4903       !StoreAccessWithGapsRequiresMasking)
4904     return true;
4905 
4906   // If masked interleaving is required, we expect that the user/target had
4907   // enabled it, because otherwise it either wouldn't have been created or
4908   // it should have been invalidated by the CostModel.
4909   assert(useMaskedInterleavedAccesses(TTI) &&
4910          "Masked interleave-groups for predicated accesses are not enabled.");
4911 
4912   if (Group->isReverse())
4913     return false;
4914 
4915   auto *Ty = getLoadStoreType(I);
4916   const Align Alignment = getLoadStoreAlignment(I);
4917   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4918                           : TTI.isLegalMaskedStore(Ty, Alignment);
4919 }
4920 
4921 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4922     Instruction *I, ElementCount VF) {
4923   // Get and ensure we have a valid memory instruction.
4924   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4925 
4926   auto *Ptr = getLoadStorePointerOperand(I);
4927   auto *ScalarTy = getLoadStoreType(I);
4928 
4929   // In order to be widened, the pointer should be consecutive, first of all.
4930   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4931     return false;
4932 
4933   // If the instruction is a store located in a predicated block, it will be
4934   // scalarized.
4935   if (isScalarWithPredication(I, VF))
4936     return false;
4937 
4938   // If the instruction's allocated size doesn't equal it's type size, it
4939   // requires padding and will be scalarized.
4940   auto &DL = I->getModule()->getDataLayout();
4941   if (hasIrregularType(ScalarTy, DL))
4942     return false;
4943 
4944   return true;
4945 }
4946 
4947 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4948   // We should not collect Uniforms more than once per VF. Right now,
4949   // this function is called from collectUniformsAndScalars(), which
4950   // already does this check. Collecting Uniforms for VF=1 does not make any
4951   // sense.
4952 
4953   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4954          "This function should not be visited twice for the same VF");
4955 
4956   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4957   // not analyze again.  Uniforms.count(VF) will return 1.
4958   Uniforms[VF].clear();
4959 
4960   // We now know that the loop is vectorizable!
4961   // Collect instructions inside the loop that will remain uniform after
4962   // vectorization.
4963 
4964   // Global values, params and instructions outside of current loop are out of
4965   // scope.
4966   auto isOutOfScope = [&](Value *V) -> bool {
4967     Instruction *I = dyn_cast<Instruction>(V);
4968     return (!I || !TheLoop->contains(I));
4969   };
4970 
4971   // Worklist containing uniform instructions demanding lane 0.
4972   SetVector<Instruction *> Worklist;
4973   BasicBlock *Latch = TheLoop->getLoopLatch();
4974 
4975   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4976   // that are scalar with predication must not be considered uniform after
4977   // vectorization, because that would create an erroneous replicating region
4978   // where only a single instance out of VF should be formed.
4979   // TODO: optimize such seldom cases if found important, see PR40816.
4980   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4981     if (isOutOfScope(I)) {
4982       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
4983                         << *I << "\n");
4984       return;
4985     }
4986     if (isScalarWithPredication(I, VF)) {
4987       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4988                         << *I << "\n");
4989       return;
4990     }
4991     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4992     Worklist.insert(I);
4993   };
4994 
4995   // Start with the conditional branch. If the branch condition is an
4996   // instruction contained in the loop that is only used by the branch, it is
4997   // uniform.
4998   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4999   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5000     addToWorklistIfAllowed(Cmp);
5001 
5002   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5003     InstWidening WideningDecision = getWideningDecision(I, VF);
5004     assert(WideningDecision != CM_Unknown &&
5005            "Widening decision should be ready at this moment");
5006 
5007     // A uniform memory op is itself uniform.  We exclude uniform stores
5008     // here as they demand the last lane, not the first one.
5009     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5010       assert(WideningDecision == CM_Scalarize);
5011       return true;
5012     }
5013 
5014     return (WideningDecision == CM_Widen ||
5015             WideningDecision == CM_Widen_Reverse ||
5016             WideningDecision == CM_Interleave);
5017   };
5018 
5019 
5020   // Returns true if Ptr is the pointer operand of a memory access instruction
5021   // I, and I is known to not require scalarization.
5022   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5023     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5024   };
5025 
5026   // Holds a list of values which are known to have at least one uniform use.
5027   // Note that there may be other uses which aren't uniform.  A "uniform use"
5028   // here is something which only demands lane 0 of the unrolled iterations;
5029   // it does not imply that all lanes produce the same value (e.g. this is not
5030   // the usual meaning of uniform)
5031   SetVector<Value *> HasUniformUse;
5032 
5033   // Scan the loop for instructions which are either a) known to have only
5034   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5035   for (auto *BB : TheLoop->blocks())
5036     for (auto &I : *BB) {
5037       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5038         switch (II->getIntrinsicID()) {
5039         case Intrinsic::sideeffect:
5040         case Intrinsic::experimental_noalias_scope_decl:
5041         case Intrinsic::assume:
5042         case Intrinsic::lifetime_start:
5043         case Intrinsic::lifetime_end:
5044           if (TheLoop->hasLoopInvariantOperands(&I))
5045             addToWorklistIfAllowed(&I);
5046           break;
5047         default:
5048           break;
5049         }
5050       }
5051 
5052       // ExtractValue instructions must be uniform, because the operands are
5053       // known to be loop-invariant.
5054       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5055         assert(isOutOfScope(EVI->getAggregateOperand()) &&
5056                "Expected aggregate value to be loop invariant");
5057         addToWorklistIfAllowed(EVI);
5058         continue;
5059       }
5060 
5061       // If there's no pointer operand, there's nothing to do.
5062       auto *Ptr = getLoadStorePointerOperand(&I);
5063       if (!Ptr)
5064         continue;
5065 
5066       // A uniform memory op is itself uniform.  We exclude uniform stores
5067       // here as they demand the last lane, not the first one.
5068       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5069         addToWorklistIfAllowed(&I);
5070 
5071       if (isUniformDecision(&I, VF)) {
5072         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5073         HasUniformUse.insert(Ptr);
5074       }
5075     }
5076 
5077   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5078   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5079   // disallows uses outside the loop as well.
5080   for (auto *V : HasUniformUse) {
5081     if (isOutOfScope(V))
5082       continue;
5083     auto *I = cast<Instruction>(V);
5084     auto UsersAreMemAccesses =
5085       llvm::all_of(I->users(), [&](User *U) -> bool {
5086         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5087       });
5088     if (UsersAreMemAccesses)
5089       addToWorklistIfAllowed(I);
5090   }
5091 
5092   // Expand Worklist in topological order: whenever a new instruction
5093   // is added , its users should be already inside Worklist.  It ensures
5094   // a uniform instruction will only be used by uniform instructions.
5095   unsigned idx = 0;
5096   while (idx != Worklist.size()) {
5097     Instruction *I = Worklist[idx++];
5098 
5099     for (auto OV : I->operand_values()) {
5100       // isOutOfScope operands cannot be uniform instructions.
5101       if (isOutOfScope(OV))
5102         continue;
5103       // First order recurrence Phi's should typically be considered
5104       // non-uniform.
5105       auto *OP = dyn_cast<PHINode>(OV);
5106       if (OP && Legal->isFirstOrderRecurrence(OP))
5107         continue;
5108       // If all the users of the operand are uniform, then add the
5109       // operand into the uniform worklist.
5110       auto *OI = cast<Instruction>(OV);
5111       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5112             auto *J = cast<Instruction>(U);
5113             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5114           }))
5115         addToWorklistIfAllowed(OI);
5116     }
5117   }
5118 
5119   // For an instruction to be added into Worklist above, all its users inside
5120   // the loop should also be in Worklist. However, this condition cannot be
5121   // true for phi nodes that form a cyclic dependence. We must process phi
5122   // nodes separately. An induction variable will remain uniform if all users
5123   // of the induction variable and induction variable update remain uniform.
5124   // The code below handles both pointer and non-pointer induction variables.
5125   for (auto &Induction : Legal->getInductionVars()) {
5126     auto *Ind = Induction.first;
5127     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5128 
5129     // Determine if all users of the induction variable are uniform after
5130     // vectorization.
5131     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5132       auto *I = cast<Instruction>(U);
5133       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5134              isVectorizedMemAccessUse(I, Ind);
5135     });
5136     if (!UniformInd)
5137       continue;
5138 
5139     // Determine if all users of the induction variable update instruction are
5140     // uniform after vectorization.
5141     auto UniformIndUpdate =
5142         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5143           auto *I = cast<Instruction>(U);
5144           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5145                  isVectorizedMemAccessUse(I, IndUpdate);
5146         });
5147     if (!UniformIndUpdate)
5148       continue;
5149 
5150     // The induction variable and its update instruction will remain uniform.
5151     addToWorklistIfAllowed(Ind);
5152     addToWorklistIfAllowed(IndUpdate);
5153   }
5154 
5155   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5156 }
5157 
5158 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5159   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5160 
5161   if (Legal->getRuntimePointerChecking()->Need) {
5162     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5163         "runtime pointer checks needed. Enable vectorization of this "
5164         "loop with '#pragma clang loop vectorize(enable)' when "
5165         "compiling with -Os/-Oz",
5166         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5167     return true;
5168   }
5169 
5170   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5171     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5172         "runtime SCEV checks needed. Enable vectorization of this "
5173         "loop with '#pragma clang loop vectorize(enable)' when "
5174         "compiling with -Os/-Oz",
5175         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5176     return true;
5177   }
5178 
5179   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5180   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5181     reportVectorizationFailure("Runtime stride check for small trip count",
5182         "runtime stride == 1 checks needed. Enable vectorization of "
5183         "this loop without such check by compiling with -Os/-Oz",
5184         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5185     return true;
5186   }
5187 
5188   return false;
5189 }
5190 
5191 ElementCount
5192 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5193   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5194     return ElementCount::getScalable(0);
5195 
5196   if (Hints->isScalableVectorizationDisabled()) {
5197     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5198                             "ScalableVectorizationDisabled", ORE, TheLoop);
5199     return ElementCount::getScalable(0);
5200   }
5201 
5202   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5203 
5204   auto MaxScalableVF = ElementCount::getScalable(
5205       std::numeric_limits<ElementCount::ScalarTy>::max());
5206 
5207   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5208   // FIXME: While for scalable vectors this is currently sufficient, this should
5209   // be replaced by a more detailed mechanism that filters out specific VFs,
5210   // instead of invalidating vectorization for a whole set of VFs based on the
5211   // MaxVF.
5212 
5213   // Disable scalable vectorization if the loop contains unsupported reductions.
5214   if (!canVectorizeReductions(MaxScalableVF)) {
5215     reportVectorizationInfo(
5216         "Scalable vectorization not supported for the reduction "
5217         "operations found in this loop.",
5218         "ScalableVFUnfeasible", ORE, TheLoop);
5219     return ElementCount::getScalable(0);
5220   }
5221 
5222   // Disable scalable vectorization if the loop contains any instructions
5223   // with element types not supported for scalable vectors.
5224   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5225         return !Ty->isVoidTy() &&
5226                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5227       })) {
5228     reportVectorizationInfo("Scalable vectorization is not supported "
5229                             "for all element types found in this loop.",
5230                             "ScalableVFUnfeasible", ORE, TheLoop);
5231     return ElementCount::getScalable(0);
5232   }
5233 
5234   if (Legal->isSafeForAnyVectorWidth())
5235     return MaxScalableVF;
5236 
5237   // Limit MaxScalableVF by the maximum safe dependence distance.
5238   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5239   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5240     MaxVScale =
5241         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5242   MaxScalableVF = ElementCount::getScalable(
5243       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5244   if (!MaxScalableVF)
5245     reportVectorizationInfo(
5246         "Max legal vector width too small, scalable vectorization "
5247         "unfeasible.",
5248         "ScalableVFUnfeasible", ORE, TheLoop);
5249 
5250   return MaxScalableVF;
5251 }
5252 
5253 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5254     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5255   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5256   unsigned SmallestType, WidestType;
5257   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5258 
5259   // Get the maximum safe dependence distance in bits computed by LAA.
5260   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5261   // the memory accesses that is most restrictive (involved in the smallest
5262   // dependence distance).
5263   unsigned MaxSafeElements =
5264       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5265 
5266   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5267   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5268 
5269   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5270                     << ".\n");
5271   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5272                     << ".\n");
5273 
5274   // First analyze the UserVF, fall back if the UserVF should be ignored.
5275   if (UserVF) {
5276     auto MaxSafeUserVF =
5277         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5278 
5279     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5280       // If `VF=vscale x N` is safe, then so is `VF=N`
5281       if (UserVF.isScalable())
5282         return FixedScalableVFPair(
5283             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5284       else
5285         return UserVF;
5286     }
5287 
5288     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5289 
5290     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5291     // is better to ignore the hint and let the compiler choose a suitable VF.
5292     if (!UserVF.isScalable()) {
5293       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5294                         << " is unsafe, clamping to max safe VF="
5295                         << MaxSafeFixedVF << ".\n");
5296       ORE->emit([&]() {
5297         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5298                                           TheLoop->getStartLoc(),
5299                                           TheLoop->getHeader())
5300                << "User-specified vectorization factor "
5301                << ore::NV("UserVectorizationFactor", UserVF)
5302                << " is unsafe, clamping to maximum safe vectorization factor "
5303                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5304       });
5305       return MaxSafeFixedVF;
5306     }
5307 
5308     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5309       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5310                         << " is ignored because scalable vectors are not "
5311                            "available.\n");
5312       ORE->emit([&]() {
5313         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5314                                           TheLoop->getStartLoc(),
5315                                           TheLoop->getHeader())
5316                << "User-specified vectorization factor "
5317                << ore::NV("UserVectorizationFactor", UserVF)
5318                << " is ignored because the target does not support scalable "
5319                   "vectors. The compiler will pick a more suitable value.";
5320       });
5321     } else {
5322       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5323                         << " is unsafe. Ignoring scalable UserVF.\n");
5324       ORE->emit([&]() {
5325         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5326                                           TheLoop->getStartLoc(),
5327                                           TheLoop->getHeader())
5328                << "User-specified vectorization factor "
5329                << ore::NV("UserVectorizationFactor", UserVF)
5330                << " is unsafe. Ignoring the hint to let the compiler pick a "
5331                   "more suitable value.";
5332       });
5333     }
5334   }
5335 
5336   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5337                     << " / " << WidestType << " bits.\n");
5338 
5339   FixedScalableVFPair Result(ElementCount::getFixed(1),
5340                              ElementCount::getScalable(0));
5341   if (auto MaxVF =
5342           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5343                                   MaxSafeFixedVF, FoldTailByMasking))
5344     Result.FixedVF = MaxVF;
5345 
5346   if (auto MaxVF =
5347           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5348                                   MaxSafeScalableVF, FoldTailByMasking))
5349     if (MaxVF.isScalable()) {
5350       Result.ScalableVF = MaxVF;
5351       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5352                         << "\n");
5353     }
5354 
5355   return Result;
5356 }
5357 
5358 FixedScalableVFPair
5359 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5360   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5361     // TODO: It may by useful to do since it's still likely to be dynamically
5362     // uniform if the target can skip.
5363     reportVectorizationFailure(
5364         "Not inserting runtime ptr check for divergent target",
5365         "runtime pointer checks needed. Not enabled for divergent target",
5366         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5367     return FixedScalableVFPair::getNone();
5368   }
5369 
5370   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5371   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5372   if (TC == 1) {
5373     reportVectorizationFailure("Single iteration (non) loop",
5374         "loop trip count is one, irrelevant for vectorization",
5375         "SingleIterationLoop", ORE, TheLoop);
5376     return FixedScalableVFPair::getNone();
5377   }
5378 
5379   switch (ScalarEpilogueStatus) {
5380   case CM_ScalarEpilogueAllowed:
5381     return computeFeasibleMaxVF(TC, UserVF, false);
5382   case CM_ScalarEpilogueNotAllowedUsePredicate:
5383     LLVM_FALLTHROUGH;
5384   case CM_ScalarEpilogueNotNeededUsePredicate:
5385     LLVM_DEBUG(
5386         dbgs() << "LV: vector predicate hint/switch found.\n"
5387                << "LV: Not allowing scalar epilogue, creating predicated "
5388                << "vector loop.\n");
5389     break;
5390   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5391     // fallthrough as a special case of OptForSize
5392   case CM_ScalarEpilogueNotAllowedOptSize:
5393     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5394       LLVM_DEBUG(
5395           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5396     else
5397       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5398                         << "count.\n");
5399 
5400     // Bail if runtime checks are required, which are not good when optimising
5401     // for size.
5402     if (runtimeChecksRequired())
5403       return FixedScalableVFPair::getNone();
5404 
5405     break;
5406   }
5407 
5408   // The only loops we can vectorize without a scalar epilogue, are loops with
5409   // a bottom-test and a single exiting block. We'd have to handle the fact
5410   // that not every instruction executes on the last iteration.  This will
5411   // require a lane mask which varies through the vector loop body.  (TODO)
5412   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5413     // If there was a tail-folding hint/switch, but we can't fold the tail by
5414     // masking, fallback to a vectorization with a scalar epilogue.
5415     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5416       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5417                            "scalar epilogue instead.\n");
5418       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5419       return computeFeasibleMaxVF(TC, UserVF, false);
5420     }
5421     return FixedScalableVFPair::getNone();
5422   }
5423 
5424   // Now try the tail folding
5425 
5426   // Invalidate interleave groups that require an epilogue if we can't mask
5427   // the interleave-group.
5428   if (!useMaskedInterleavedAccesses(TTI)) {
5429     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5430            "No decisions should have been taken at this point");
5431     // Note: There is no need to invalidate any cost modeling decisions here, as
5432     // non where taken so far.
5433     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5434   }
5435 
5436   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5437   // Avoid tail folding if the trip count is known to be a multiple of any VF
5438   // we chose.
5439   // FIXME: The condition below pessimises the case for fixed-width vectors,
5440   // when scalable VFs are also candidates for vectorization.
5441   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5442     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5443     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5444            "MaxFixedVF must be a power of 2");
5445     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5446                                    : MaxFixedVF.getFixedValue();
5447     ScalarEvolution *SE = PSE.getSE();
5448     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5449     const SCEV *ExitCount = SE->getAddExpr(
5450         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5451     const SCEV *Rem = SE->getURemExpr(
5452         SE->applyLoopGuards(ExitCount, TheLoop),
5453         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5454     if (Rem->isZero()) {
5455       // Accept MaxFixedVF if we do not have a tail.
5456       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5457       return MaxFactors;
5458     }
5459   }
5460 
5461   // For scalable vectors don't use tail folding for low trip counts or
5462   // optimizing for code size. We only permit this if the user has explicitly
5463   // requested it.
5464   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5465       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5466       MaxFactors.ScalableVF.isVector())
5467     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5468 
5469   // If we don't know the precise trip count, or if the trip count that we
5470   // found modulo the vectorization factor is not zero, try to fold the tail
5471   // by masking.
5472   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5473   if (Legal->prepareToFoldTailByMasking()) {
5474     FoldTailByMasking = true;
5475     return MaxFactors;
5476   }
5477 
5478   // If there was a tail-folding hint/switch, but we can't fold the tail by
5479   // masking, fallback to a vectorization with a scalar epilogue.
5480   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5481     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5482                          "scalar epilogue instead.\n");
5483     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5484     return MaxFactors;
5485   }
5486 
5487   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5488     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5489     return FixedScalableVFPair::getNone();
5490   }
5491 
5492   if (TC == 0) {
5493     reportVectorizationFailure(
5494         "Unable to calculate the loop count due to complex control flow",
5495         "unable to calculate the loop count due to complex control flow",
5496         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5497     return FixedScalableVFPair::getNone();
5498   }
5499 
5500   reportVectorizationFailure(
5501       "Cannot optimize for size and vectorize at the same time.",
5502       "cannot optimize for size and vectorize at the same time. "
5503       "Enable vectorization of this loop with '#pragma clang loop "
5504       "vectorize(enable)' when compiling with -Os/-Oz",
5505       "NoTailLoopWithOptForSize", ORE, TheLoop);
5506   return FixedScalableVFPair::getNone();
5507 }
5508 
5509 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5510     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5511     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5512   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5513   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5514       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5515                            : TargetTransformInfo::RGK_FixedWidthVector);
5516 
5517   // Convenience function to return the minimum of two ElementCounts.
5518   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5519     assert((LHS.isScalable() == RHS.isScalable()) &&
5520            "Scalable flags must match");
5521     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5522   };
5523 
5524   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5525   // Note that both WidestRegister and WidestType may not be a powers of 2.
5526   auto MaxVectorElementCount = ElementCount::get(
5527       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5528       ComputeScalableMaxVF);
5529   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5530   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5531                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5532 
5533   if (!MaxVectorElementCount) {
5534     LLVM_DEBUG(dbgs() << "LV: The target has no "
5535                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5536                       << " vector registers.\n");
5537     return ElementCount::getFixed(1);
5538   }
5539 
5540   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5541   if (ConstTripCount &&
5542       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5543       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5544     // If loop trip count (TC) is known at compile time there is no point in
5545     // choosing VF greater than TC (as done in the loop below). Select maximum
5546     // power of two which doesn't exceed TC.
5547     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5548     // when the TC is less than or equal to the known number of lanes.
5549     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5550     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5551                          "exceeding the constant trip count: "
5552                       << ClampedConstTripCount << "\n");
5553     return ElementCount::getFixed(ClampedConstTripCount);
5554   }
5555 
5556   ElementCount MaxVF = MaxVectorElementCount;
5557   if (TTI.shouldMaximizeVectorBandwidth() ||
5558       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5559     auto MaxVectorElementCountMaxBW = ElementCount::get(
5560         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5561         ComputeScalableMaxVF);
5562     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5563 
5564     // Collect all viable vectorization factors larger than the default MaxVF
5565     // (i.e. MaxVectorElementCount).
5566     SmallVector<ElementCount, 8> VFs;
5567     for (ElementCount VS = MaxVectorElementCount * 2;
5568          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5569       VFs.push_back(VS);
5570 
5571     // For each VF calculate its register usage.
5572     auto RUs = calculateRegisterUsage(VFs);
5573 
5574     // Select the largest VF which doesn't require more registers than existing
5575     // ones.
5576     for (int i = RUs.size() - 1; i >= 0; --i) {
5577       bool Selected = true;
5578       for (auto &pair : RUs[i].MaxLocalUsers) {
5579         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5580         if (pair.second > TargetNumRegisters)
5581           Selected = false;
5582       }
5583       if (Selected) {
5584         MaxVF = VFs[i];
5585         break;
5586       }
5587     }
5588     if (ElementCount MinVF =
5589             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5590       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5591         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5592                           << ") with target's minimum: " << MinVF << '\n');
5593         MaxVF = MinVF;
5594       }
5595     }
5596   }
5597   return MaxVF;
5598 }
5599 
5600 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
5601   if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5602     auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
5603     auto Min = Attr.getVScaleRangeMin();
5604     auto Max = Attr.getVScaleRangeMax();
5605     if (Max && Min == Max)
5606       return Max;
5607   }
5608 
5609   return TTI.getVScaleForTuning();
5610 }
5611 
5612 bool LoopVectorizationCostModel::isMoreProfitable(
5613     const VectorizationFactor &A, const VectorizationFactor &B) const {
5614   InstructionCost CostA = A.Cost;
5615   InstructionCost CostB = B.Cost;
5616 
5617   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5618 
5619   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5620       MaxTripCount) {
5621     // If we are folding the tail and the trip count is a known (possibly small)
5622     // constant, the trip count will be rounded up to an integer number of
5623     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5624     // which we compare directly. When not folding the tail, the total cost will
5625     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5626     // approximated with the per-lane cost below instead of using the tripcount
5627     // as here.
5628     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5629     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5630     return RTCostA < RTCostB;
5631   }
5632 
5633   // Improve estimate for the vector width if it is scalable.
5634   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5635   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5636   if (Optional<unsigned> VScale = getVScaleForTuning()) {
5637     if (A.Width.isScalable())
5638       EstimatedWidthA *= VScale.getValue();
5639     if (B.Width.isScalable())
5640       EstimatedWidthB *= VScale.getValue();
5641   }
5642 
5643   // Assume vscale may be larger than 1 (or the value being tuned for),
5644   // so that scalable vectorization is slightly favorable over fixed-width
5645   // vectorization.
5646   if (A.Width.isScalable() && !B.Width.isScalable())
5647     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5648 
5649   // To avoid the need for FP division:
5650   //      (CostA / A.Width) < (CostB / B.Width)
5651   // <=>  (CostA * B.Width) < (CostB * A.Width)
5652   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5653 }
5654 
5655 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5656     const ElementCountSet &VFCandidates) {
5657   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5658   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5659   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5660   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5661          "Expected Scalar VF to be a candidate");
5662 
5663   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5664   VectorizationFactor ChosenFactor = ScalarCost;
5665 
5666   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5667   if (ForceVectorization && VFCandidates.size() > 1) {
5668     // Ignore scalar width, because the user explicitly wants vectorization.
5669     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5670     // evaluation.
5671     ChosenFactor.Cost = InstructionCost::getMax();
5672   }
5673 
5674   SmallVector<InstructionVFPair> InvalidCosts;
5675   for (const auto &i : VFCandidates) {
5676     // The cost for scalar VF=1 is already calculated, so ignore it.
5677     if (i.isScalar())
5678       continue;
5679 
5680     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5681     VectorizationFactor Candidate(i, C.first);
5682 
5683 #ifndef NDEBUG
5684     unsigned AssumedMinimumVscale = 1;
5685     if (Optional<unsigned> VScale = getVScaleForTuning())
5686       AssumedMinimumVscale = VScale.getValue();
5687     unsigned Width =
5688         Candidate.Width.isScalable()
5689             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5690             : Candidate.Width.getFixedValue();
5691     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5692                       << " costs: " << (Candidate.Cost / Width));
5693     if (i.isScalable())
5694       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5695                         << AssumedMinimumVscale << ")");
5696     LLVM_DEBUG(dbgs() << ".\n");
5697 #endif
5698 
5699     if (!C.second && !ForceVectorization) {
5700       LLVM_DEBUG(
5701           dbgs() << "LV: Not considering vector loop of width " << i
5702                  << " because it will not generate any vector instructions.\n");
5703       continue;
5704     }
5705 
5706     // If profitable add it to ProfitableVF list.
5707     if (isMoreProfitable(Candidate, ScalarCost))
5708       ProfitableVFs.push_back(Candidate);
5709 
5710     if (isMoreProfitable(Candidate, ChosenFactor))
5711       ChosenFactor = Candidate;
5712   }
5713 
5714   // Emit a report of VFs with invalid costs in the loop.
5715   if (!InvalidCosts.empty()) {
5716     // Group the remarks per instruction, keeping the instruction order from
5717     // InvalidCosts.
5718     std::map<Instruction *, unsigned> Numbering;
5719     unsigned I = 0;
5720     for (auto &Pair : InvalidCosts)
5721       if (!Numbering.count(Pair.first))
5722         Numbering[Pair.first] = I++;
5723 
5724     // Sort the list, first on instruction(number) then on VF.
5725     llvm::sort(InvalidCosts,
5726                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5727                  if (Numbering[A.first] != Numbering[B.first])
5728                    return Numbering[A.first] < Numbering[B.first];
5729                  ElementCountComparator ECC;
5730                  return ECC(A.second, B.second);
5731                });
5732 
5733     // For a list of ordered instruction-vf pairs:
5734     //   [(load, vf1), (load, vf2), (store, vf1)]
5735     // Group the instructions together to emit separate remarks for:
5736     //   load  (vf1, vf2)
5737     //   store (vf1)
5738     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5739     auto Subset = ArrayRef<InstructionVFPair>();
5740     do {
5741       if (Subset.empty())
5742         Subset = Tail.take_front(1);
5743 
5744       Instruction *I = Subset.front().first;
5745 
5746       // If the next instruction is different, or if there are no other pairs,
5747       // emit a remark for the collated subset. e.g.
5748       //   [(load, vf1), (load, vf2))]
5749       // to emit:
5750       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5751       if (Subset == Tail || Tail[Subset.size()].first != I) {
5752         std::string OutString;
5753         raw_string_ostream OS(OutString);
5754         assert(!Subset.empty() && "Unexpected empty range");
5755         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5756         for (auto &Pair : Subset)
5757           OS << (Pair.second == Subset.front().second ? "" : ", ")
5758              << Pair.second;
5759         OS << "):";
5760         if (auto *CI = dyn_cast<CallInst>(I))
5761           OS << " call to " << CI->getCalledFunction()->getName();
5762         else
5763           OS << " " << I->getOpcodeName();
5764         OS.flush();
5765         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5766         Tail = Tail.drop_front(Subset.size());
5767         Subset = {};
5768       } else
5769         // Grow the subset by one element
5770         Subset = Tail.take_front(Subset.size() + 1);
5771     } while (!Tail.empty());
5772   }
5773 
5774   if (!EnableCondStoresVectorization && NumPredStores) {
5775     reportVectorizationFailure("There are conditional stores.",
5776         "store that is conditionally executed prevents vectorization",
5777         "ConditionalStore", ORE, TheLoop);
5778     ChosenFactor = ScalarCost;
5779   }
5780 
5781   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5782                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5783              << "LV: Vectorization seems to be not beneficial, "
5784              << "but was forced by a user.\n");
5785   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5786   return ChosenFactor;
5787 }
5788 
5789 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5790     const Loop &L, ElementCount VF) const {
5791   // Cross iteration phis such as reductions need special handling and are
5792   // currently unsupported.
5793   if (any_of(L.getHeader()->phis(),
5794              [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
5795     return false;
5796 
5797   // Phis with uses outside of the loop require special handling and are
5798   // currently unsupported.
5799   for (auto &Entry : Legal->getInductionVars()) {
5800     // Look for uses of the value of the induction at the last iteration.
5801     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5802     for (User *U : PostInc->users())
5803       if (!L.contains(cast<Instruction>(U)))
5804         return false;
5805     // Look for uses of penultimate value of the induction.
5806     for (User *U : Entry.first->users())
5807       if (!L.contains(cast<Instruction>(U)))
5808         return false;
5809   }
5810 
5811   // Induction variables that are widened require special handling that is
5812   // currently not supported.
5813   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5814         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5815                  this->isProfitableToScalarize(Entry.first, VF));
5816       }))
5817     return false;
5818 
5819   // Epilogue vectorization code has not been auditted to ensure it handles
5820   // non-latch exits properly.  It may be fine, but it needs auditted and
5821   // tested.
5822   if (L.getExitingBlock() != L.getLoopLatch())
5823     return false;
5824 
5825   return true;
5826 }
5827 
5828 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5829     const ElementCount VF) const {
5830   // FIXME: We need a much better cost-model to take different parameters such
5831   // as register pressure, code size increase and cost of extra branches into
5832   // account. For now we apply a very crude heuristic and only consider loops
5833   // with vectorization factors larger than a certain value.
5834   // We also consider epilogue vectorization unprofitable for targets that don't
5835   // consider interleaving beneficial (eg. MVE).
5836   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5837     return false;
5838   // FIXME: We should consider changing the threshold for scalable
5839   // vectors to take VScaleForTuning into account.
5840   if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
5841     return true;
5842   return false;
5843 }
5844 
5845 VectorizationFactor
5846 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5847     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5848   VectorizationFactor Result = VectorizationFactor::Disabled();
5849   if (!EnableEpilogueVectorization) {
5850     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5851     return Result;
5852   }
5853 
5854   if (!isScalarEpilogueAllowed()) {
5855     LLVM_DEBUG(
5856         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5857                   "allowed.\n";);
5858     return Result;
5859   }
5860 
5861   // Not really a cost consideration, but check for unsupported cases here to
5862   // simplify the logic.
5863   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5864     LLVM_DEBUG(
5865         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5866                   "not a supported candidate.\n";);
5867     return Result;
5868   }
5869 
5870   if (EpilogueVectorizationForceVF > 1) {
5871     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5872     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5873     if (LVP.hasPlanWithVF(ForcedEC))
5874       return {ForcedEC, 0};
5875     else {
5876       LLVM_DEBUG(
5877           dbgs()
5878               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5879       return Result;
5880     }
5881   }
5882 
5883   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5884       TheLoop->getHeader()->getParent()->hasMinSize()) {
5885     LLVM_DEBUG(
5886         dbgs()
5887             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5888     return Result;
5889   }
5890 
5891   if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
5892     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5893                          "this loop\n");
5894     return Result;
5895   }
5896 
5897   // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
5898   // the main loop handles 8 lanes per iteration. We could still benefit from
5899   // vectorizing the epilogue loop with VF=4.
5900   ElementCount EstimatedRuntimeVF = MainLoopVF;
5901   if (MainLoopVF.isScalable()) {
5902     EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5903     if (Optional<unsigned> VScale = getVScaleForTuning())
5904       EstimatedRuntimeVF *= VScale.getValue();
5905   }
5906 
5907   for (auto &NextVF : ProfitableVFs)
5908     if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
5909           ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
5910          ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
5911         (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
5912         LVP.hasPlanWithVF(NextVF.Width))
5913       Result = NextVF;
5914 
5915   if (Result != VectorizationFactor::Disabled())
5916     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5917                       << Result.Width << "\n";);
5918   return Result;
5919 }
5920 
5921 std::pair<unsigned, unsigned>
5922 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5923   unsigned MinWidth = -1U;
5924   unsigned MaxWidth = 8;
5925   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5926   // For in-loop reductions, no element types are added to ElementTypesInLoop
5927   // if there are no loads/stores in the loop. In this case, check through the
5928   // reduction variables to determine the maximum width.
5929   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5930     // Reset MaxWidth so that we can find the smallest type used by recurrences
5931     // in the loop.
5932     MaxWidth = -1U;
5933     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5934       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5935       // When finding the min width used by the recurrence we need to account
5936       // for casts on the input operands of the recurrence.
5937       MaxWidth = std::min<unsigned>(
5938           MaxWidth, std::min<unsigned>(
5939                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5940                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5941     }
5942   } else {
5943     for (Type *T : ElementTypesInLoop) {
5944       MinWidth = std::min<unsigned>(
5945           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5946       MaxWidth = std::max<unsigned>(
5947           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5948     }
5949   }
5950   return {MinWidth, MaxWidth};
5951 }
5952 
5953 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5954   ElementTypesInLoop.clear();
5955   // For each block.
5956   for (BasicBlock *BB : TheLoop->blocks()) {
5957     // For each instruction in the loop.
5958     for (Instruction &I : BB->instructionsWithoutDebug()) {
5959       Type *T = I.getType();
5960 
5961       // Skip ignored values.
5962       if (ValuesToIgnore.count(&I))
5963         continue;
5964 
5965       // Only examine Loads, Stores and PHINodes.
5966       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5967         continue;
5968 
5969       // Examine PHI nodes that are reduction variables. Update the type to
5970       // account for the recurrence type.
5971       if (auto *PN = dyn_cast<PHINode>(&I)) {
5972         if (!Legal->isReductionVariable(PN))
5973           continue;
5974         const RecurrenceDescriptor &RdxDesc =
5975             Legal->getReductionVars().find(PN)->second;
5976         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5977             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5978                                       RdxDesc.getRecurrenceType(),
5979                                       TargetTransformInfo::ReductionFlags()))
5980           continue;
5981         T = RdxDesc.getRecurrenceType();
5982       }
5983 
5984       // Examine the stored values.
5985       if (auto *ST = dyn_cast<StoreInst>(&I))
5986         T = ST->getValueOperand()->getType();
5987 
5988       assert(T->isSized() &&
5989              "Expected the load/store/recurrence type to be sized");
5990 
5991       ElementTypesInLoop.insert(T);
5992     }
5993   }
5994 }
5995 
5996 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5997                                                            unsigned LoopCost) {
5998   // -- The interleave heuristics --
5999   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6000   // There are many micro-architectural considerations that we can't predict
6001   // at this level. For example, frontend pressure (on decode or fetch) due to
6002   // code size, or the number and capabilities of the execution ports.
6003   //
6004   // We use the following heuristics to select the interleave count:
6005   // 1. If the code has reductions, then we interleave to break the cross
6006   // iteration dependency.
6007   // 2. If the loop is really small, then we interleave to reduce the loop
6008   // overhead.
6009   // 3. We don't interleave if we think that we will spill registers to memory
6010   // due to the increased register pressure.
6011 
6012   if (!isScalarEpilogueAllowed())
6013     return 1;
6014 
6015   // We used the distance for the interleave count.
6016   if (Legal->getMaxSafeDepDistBytes() != -1U)
6017     return 1;
6018 
6019   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6020   const bool HasReductions = !Legal->getReductionVars().empty();
6021   // Do not interleave loops with a relatively small known or estimated trip
6022   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6023   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6024   // because with the above conditions interleaving can expose ILP and break
6025   // cross iteration dependences for reductions.
6026   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6027       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6028     return 1;
6029 
6030   RegisterUsage R = calculateRegisterUsage({VF})[0];
6031   // We divide by these constants so assume that we have at least one
6032   // instruction that uses at least one register.
6033   for (auto& pair : R.MaxLocalUsers) {
6034     pair.second = std::max(pair.second, 1U);
6035   }
6036 
6037   // We calculate the interleave count using the following formula.
6038   // Subtract the number of loop invariants from the number of available
6039   // registers. These registers are used by all of the interleaved instances.
6040   // Next, divide the remaining registers by the number of registers that is
6041   // required by the loop, in order to estimate how many parallel instances
6042   // fit without causing spills. All of this is rounded down if necessary to be
6043   // a power of two. We want power of two interleave count to simplify any
6044   // addressing operations or alignment considerations.
6045   // We also want power of two interleave counts to ensure that the induction
6046   // variable of the vector loop wraps to zero, when tail is folded by masking;
6047   // this currently happens when OptForSize, in which case IC is set to 1 above.
6048   unsigned IC = UINT_MAX;
6049 
6050   for (auto& pair : R.MaxLocalUsers) {
6051     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6052     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6053                       << " registers of "
6054                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6055     if (VF.isScalar()) {
6056       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6057         TargetNumRegisters = ForceTargetNumScalarRegs;
6058     } else {
6059       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6060         TargetNumRegisters = ForceTargetNumVectorRegs;
6061     }
6062     unsigned MaxLocalUsers = pair.second;
6063     unsigned LoopInvariantRegs = 0;
6064     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6065       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6066 
6067     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6068     // Don't count the induction variable as interleaved.
6069     if (EnableIndVarRegisterHeur) {
6070       TmpIC =
6071           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6072                         std::max(1U, (MaxLocalUsers - 1)));
6073     }
6074 
6075     IC = std::min(IC, TmpIC);
6076   }
6077 
6078   // Clamp the interleave ranges to reasonable counts.
6079   unsigned MaxInterleaveCount =
6080       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6081 
6082   // Check if the user has overridden the max.
6083   if (VF.isScalar()) {
6084     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6085       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6086   } else {
6087     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6088       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6089   }
6090 
6091   // If trip count is known or estimated compile time constant, limit the
6092   // interleave count to be less than the trip count divided by VF, provided it
6093   // is at least 1.
6094   //
6095   // For scalable vectors we can't know if interleaving is beneficial. It may
6096   // not be beneficial for small loops if none of the lanes in the second vector
6097   // iterations is enabled. However, for larger loops, there is likely to be a
6098   // similar benefit as for fixed-width vectors. For now, we choose to leave
6099   // the InterleaveCount as if vscale is '1', although if some information about
6100   // the vector is known (e.g. min vector size), we can make a better decision.
6101   if (BestKnownTC) {
6102     MaxInterleaveCount =
6103         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6104     // Make sure MaxInterleaveCount is greater than 0.
6105     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6106   }
6107 
6108   assert(MaxInterleaveCount > 0 &&
6109          "Maximum interleave count must be greater than 0");
6110 
6111   // Clamp the calculated IC to be between the 1 and the max interleave count
6112   // that the target and trip count allows.
6113   if (IC > MaxInterleaveCount)
6114     IC = MaxInterleaveCount;
6115   else
6116     // Make sure IC is greater than 0.
6117     IC = std::max(1u, IC);
6118 
6119   assert(IC > 0 && "Interleave count must be greater than 0.");
6120 
6121   // If we did not calculate the cost for VF (because the user selected the VF)
6122   // then we calculate the cost of VF here.
6123   if (LoopCost == 0) {
6124     InstructionCost C = expectedCost(VF).first;
6125     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
6126     LoopCost = *C.getValue();
6127   }
6128 
6129   assert(LoopCost && "Non-zero loop cost expected");
6130 
6131   // Interleave if we vectorized this loop and there is a reduction that could
6132   // benefit from interleaving.
6133   if (VF.isVector() && HasReductions) {
6134     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6135     return IC;
6136   }
6137 
6138   // Note that if we've already vectorized the loop we will have done the
6139   // runtime check and so interleaving won't require further checks.
6140   bool InterleavingRequiresRuntimePointerCheck =
6141       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6142 
6143   // We want to interleave small loops in order to reduce the loop overhead and
6144   // potentially expose ILP opportunities.
6145   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6146                     << "LV: IC is " << IC << '\n'
6147                     << "LV: VF is " << VF << '\n');
6148   const bool AggressivelyInterleaveReductions =
6149       TTI.enableAggressiveInterleaving(HasReductions);
6150   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6151     // We assume that the cost overhead is 1 and we use the cost model
6152     // to estimate the cost of the loop and interleave until the cost of the
6153     // loop overhead is about 5% of the cost of the loop.
6154     unsigned SmallIC =
6155         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6156 
6157     // Interleave until store/load ports (estimated by max interleave count) are
6158     // saturated.
6159     unsigned NumStores = Legal->getNumStores();
6160     unsigned NumLoads = Legal->getNumLoads();
6161     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6162     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6163 
6164     // There is little point in interleaving for reductions containing selects
6165     // and compares when VF=1 since it may just create more overhead than it's
6166     // worth for loops with small trip counts. This is because we still have to
6167     // do the final reduction after the loop.
6168     bool HasSelectCmpReductions =
6169         HasReductions &&
6170         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6171           const RecurrenceDescriptor &RdxDesc = Reduction.second;
6172           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
6173               RdxDesc.getRecurrenceKind());
6174         });
6175     if (HasSelectCmpReductions) {
6176       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
6177       return 1;
6178     }
6179 
6180     // If we have a scalar reduction (vector reductions are already dealt with
6181     // by this point), we can increase the critical path length if the loop
6182     // we're interleaving is inside another loop. For tree-wise reductions
6183     // set the limit to 2, and for ordered reductions it's best to disable
6184     // interleaving entirely.
6185     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6186       bool HasOrderedReductions =
6187           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6188             const RecurrenceDescriptor &RdxDesc = Reduction.second;
6189             return RdxDesc.isOrdered();
6190           });
6191       if (HasOrderedReductions) {
6192         LLVM_DEBUG(
6193             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
6194         return 1;
6195       }
6196 
6197       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6198       SmallIC = std::min(SmallIC, F);
6199       StoresIC = std::min(StoresIC, F);
6200       LoadsIC = std::min(LoadsIC, F);
6201     }
6202 
6203     if (EnableLoadStoreRuntimeInterleave &&
6204         std::max(StoresIC, LoadsIC) > SmallIC) {
6205       LLVM_DEBUG(
6206           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6207       return std::max(StoresIC, LoadsIC);
6208     }
6209 
6210     // If there are scalar reductions and TTI has enabled aggressive
6211     // interleaving for reductions, we will interleave to expose ILP.
6212     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6213         AggressivelyInterleaveReductions) {
6214       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6215       // Interleave no less than SmallIC but not as aggressive as the normal IC
6216       // to satisfy the rare situation when resources are too limited.
6217       return std::max(IC / 2, SmallIC);
6218     } else {
6219       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6220       return SmallIC;
6221     }
6222   }
6223 
6224   // Interleave if this is a large loop (small loops are already dealt with by
6225   // this point) that could benefit from interleaving.
6226   if (AggressivelyInterleaveReductions) {
6227     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6228     return IC;
6229   }
6230 
6231   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6232   return 1;
6233 }
6234 
6235 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6236 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6237   // This function calculates the register usage by measuring the highest number
6238   // of values that are alive at a single location. Obviously, this is a very
6239   // rough estimation. We scan the loop in a topological order in order and
6240   // assign a number to each instruction. We use RPO to ensure that defs are
6241   // met before their users. We assume that each instruction that has in-loop
6242   // users starts an interval. We record every time that an in-loop value is
6243   // used, so we have a list of the first and last occurrences of each
6244   // instruction. Next, we transpose this data structure into a multi map that
6245   // holds the list of intervals that *end* at a specific location. This multi
6246   // map allows us to perform a linear search. We scan the instructions linearly
6247   // and record each time that a new interval starts, by placing it in a set.
6248   // If we find this value in the multi-map then we remove it from the set.
6249   // The max register usage is the maximum size of the set.
6250   // We also search for instructions that are defined outside the loop, but are
6251   // used inside the loop. We need this number separately from the max-interval
6252   // usage number because when we unroll, loop-invariant values do not take
6253   // more register.
6254   LoopBlocksDFS DFS(TheLoop);
6255   DFS.perform(LI);
6256 
6257   RegisterUsage RU;
6258 
6259   // Each 'key' in the map opens a new interval. The values
6260   // of the map are the index of the 'last seen' usage of the
6261   // instruction that is the key.
6262   using IntervalMap = DenseMap<Instruction *, unsigned>;
6263 
6264   // Maps instruction to its index.
6265   SmallVector<Instruction *, 64> IdxToInstr;
6266   // Marks the end of each interval.
6267   IntervalMap EndPoint;
6268   // Saves the list of instruction indices that are used in the loop.
6269   SmallPtrSet<Instruction *, 8> Ends;
6270   // Saves the list of values that are used in the loop but are
6271   // defined outside the loop, such as arguments and constants.
6272   SmallPtrSet<Value *, 8> LoopInvariants;
6273 
6274   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6275     for (Instruction &I : BB->instructionsWithoutDebug()) {
6276       IdxToInstr.push_back(&I);
6277 
6278       // Save the end location of each USE.
6279       for (Value *U : I.operands()) {
6280         auto *Instr = dyn_cast<Instruction>(U);
6281 
6282         // Ignore non-instruction values such as arguments, constants, etc.
6283         if (!Instr)
6284           continue;
6285 
6286         // If this instruction is outside the loop then record it and continue.
6287         if (!TheLoop->contains(Instr)) {
6288           LoopInvariants.insert(Instr);
6289           continue;
6290         }
6291 
6292         // Overwrite previous end points.
6293         EndPoint[Instr] = IdxToInstr.size();
6294         Ends.insert(Instr);
6295       }
6296     }
6297   }
6298 
6299   // Saves the list of intervals that end with the index in 'key'.
6300   using InstrList = SmallVector<Instruction *, 2>;
6301   DenseMap<unsigned, InstrList> TransposeEnds;
6302 
6303   // Transpose the EndPoints to a list of values that end at each index.
6304   for (auto &Interval : EndPoint)
6305     TransposeEnds[Interval.second].push_back(Interval.first);
6306 
6307   SmallPtrSet<Instruction *, 8> OpenIntervals;
6308   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6309   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6310 
6311   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6312 
6313   // A lambda that gets the register usage for the given type and VF.
6314   const auto &TTICapture = TTI;
6315   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6316     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6317       return 0;
6318     InstructionCost::CostType RegUsage =
6319         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6320     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6321            "Nonsensical values for register usage.");
6322     return RegUsage;
6323   };
6324 
6325   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6326     Instruction *I = IdxToInstr[i];
6327 
6328     // Remove all of the instructions that end at this location.
6329     InstrList &List = TransposeEnds[i];
6330     for (Instruction *ToRemove : List)
6331       OpenIntervals.erase(ToRemove);
6332 
6333     // Ignore instructions that are never used within the loop.
6334     if (!Ends.count(I))
6335       continue;
6336 
6337     // Skip ignored values.
6338     if (ValuesToIgnore.count(I))
6339       continue;
6340 
6341     // For each VF find the maximum usage of registers.
6342     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6343       // Count the number of live intervals.
6344       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6345 
6346       if (VFs[j].isScalar()) {
6347         for (auto Inst : OpenIntervals) {
6348           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6349           if (RegUsage.find(ClassID) == RegUsage.end())
6350             RegUsage[ClassID] = 1;
6351           else
6352             RegUsage[ClassID] += 1;
6353         }
6354       } else {
6355         collectUniformsAndScalars(VFs[j]);
6356         for (auto Inst : OpenIntervals) {
6357           // Skip ignored values for VF > 1.
6358           if (VecValuesToIgnore.count(Inst))
6359             continue;
6360           if (isScalarAfterVectorization(Inst, VFs[j])) {
6361             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6362             if (RegUsage.find(ClassID) == RegUsage.end())
6363               RegUsage[ClassID] = 1;
6364             else
6365               RegUsage[ClassID] += 1;
6366           } else {
6367             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6368             if (RegUsage.find(ClassID) == RegUsage.end())
6369               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6370             else
6371               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6372           }
6373         }
6374       }
6375 
6376       for (auto& pair : RegUsage) {
6377         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6378           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6379         else
6380           MaxUsages[j][pair.first] = pair.second;
6381       }
6382     }
6383 
6384     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6385                       << OpenIntervals.size() << '\n');
6386 
6387     // Add the current instruction to the list of open intervals.
6388     OpenIntervals.insert(I);
6389   }
6390 
6391   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6392     SmallMapVector<unsigned, unsigned, 4> Invariant;
6393 
6394     for (auto Inst : LoopInvariants) {
6395       unsigned Usage =
6396           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6397       unsigned ClassID =
6398           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6399       if (Invariant.find(ClassID) == Invariant.end())
6400         Invariant[ClassID] = Usage;
6401       else
6402         Invariant[ClassID] += Usage;
6403     }
6404 
6405     LLVM_DEBUG({
6406       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6407       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6408              << " item\n";
6409       for (const auto &pair : MaxUsages[i]) {
6410         dbgs() << "LV(REG): RegisterClass: "
6411                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6412                << " registers\n";
6413       }
6414       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6415              << " item\n";
6416       for (const auto &pair : Invariant) {
6417         dbgs() << "LV(REG): RegisterClass: "
6418                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6419                << " registers\n";
6420       }
6421     });
6422 
6423     RU.LoopInvariantRegs = Invariant;
6424     RU.MaxLocalUsers = MaxUsages[i];
6425     RUs[i] = RU;
6426   }
6427 
6428   return RUs;
6429 }
6430 
6431 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6432   // If we aren't vectorizing the loop, or if we've already collected the
6433   // instructions to scalarize, there's nothing to do. Collection may already
6434   // have occurred if we have a user-selected VF and are now computing the
6435   // expected cost for interleaving.
6436   if (VF.isScalar() || VF.isZero() ||
6437       InstsToScalarize.find(VF) != InstsToScalarize.end())
6438     return;
6439 
6440   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6441   // not profitable to scalarize any instructions, the presence of VF in the
6442   // map will indicate that we've analyzed it already.
6443   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6444 
6445   // Find all the instructions that are scalar with predication in the loop and
6446   // determine if it would be better to not if-convert the blocks they are in.
6447   // If so, we also record the instructions to scalarize.
6448   for (BasicBlock *BB : TheLoop->blocks()) {
6449     if (!blockNeedsPredicationForAnyReason(BB))
6450       continue;
6451     for (Instruction &I : *BB)
6452       if (isScalarWithPredication(&I, VF)) {
6453         ScalarCostsTy ScalarCosts;
6454         // Do not apply discount if scalable, because that would lead to
6455         // invalid scalarization costs.
6456         if (!VF.isScalable() &&
6457             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6458           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6459         // Remember that BB will remain after vectorization.
6460         PredicatedBBsAfterVectorization.insert(BB);
6461       }
6462   }
6463 }
6464 
6465 int LoopVectorizationCostModel::computePredInstDiscount(
6466     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6467   assert(!isUniformAfterVectorization(PredInst, VF) &&
6468          "Instruction marked uniform-after-vectorization will be predicated");
6469 
6470   // Initialize the discount to zero, meaning that the scalar version and the
6471   // vector version cost the same.
6472   InstructionCost Discount = 0;
6473 
6474   // Holds instructions to analyze. The instructions we visit are mapped in
6475   // ScalarCosts. Those instructions are the ones that would be scalarized if
6476   // we find that the scalar version costs less.
6477   SmallVector<Instruction *, 8> Worklist;
6478 
6479   // Returns true if the given instruction can be scalarized.
6480   auto canBeScalarized = [&](Instruction *I) -> bool {
6481     // We only attempt to scalarize instructions forming a single-use chain
6482     // from the original predicated block that would otherwise be vectorized.
6483     // Although not strictly necessary, we give up on instructions we know will
6484     // already be scalar to avoid traversing chains that are unlikely to be
6485     // beneficial.
6486     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6487         isScalarAfterVectorization(I, VF))
6488       return false;
6489 
6490     // If the instruction is scalar with predication, it will be analyzed
6491     // separately. We ignore it within the context of PredInst.
6492     if (isScalarWithPredication(I, VF))
6493       return false;
6494 
6495     // If any of the instruction's operands are uniform after vectorization,
6496     // the instruction cannot be scalarized. This prevents, for example, a
6497     // masked load from being scalarized.
6498     //
6499     // We assume we will only emit a value for lane zero of an instruction
6500     // marked uniform after vectorization, rather than VF identical values.
6501     // Thus, if we scalarize an instruction that uses a uniform, we would
6502     // create uses of values corresponding to the lanes we aren't emitting code
6503     // for. This behavior can be changed by allowing getScalarValue to clone
6504     // the lane zero values for uniforms rather than asserting.
6505     for (Use &U : I->operands())
6506       if (auto *J = dyn_cast<Instruction>(U.get()))
6507         if (isUniformAfterVectorization(J, VF))
6508           return false;
6509 
6510     // Otherwise, we can scalarize the instruction.
6511     return true;
6512   };
6513 
6514   // Compute the expected cost discount from scalarizing the entire expression
6515   // feeding the predicated instruction. We currently only consider expressions
6516   // that are single-use instruction chains.
6517   Worklist.push_back(PredInst);
6518   while (!Worklist.empty()) {
6519     Instruction *I = Worklist.pop_back_val();
6520 
6521     // If we've already analyzed the instruction, there's nothing to do.
6522     if (ScalarCosts.find(I) != ScalarCosts.end())
6523       continue;
6524 
6525     // Compute the cost of the vector instruction. Note that this cost already
6526     // includes the scalarization overhead of the predicated instruction.
6527     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6528 
6529     // Compute the cost of the scalarized instruction. This cost is the cost of
6530     // the instruction as if it wasn't if-converted and instead remained in the
6531     // predicated block. We will scale this cost by block probability after
6532     // computing the scalarization overhead.
6533     InstructionCost ScalarCost =
6534         VF.getFixedValue() *
6535         getInstructionCost(I, ElementCount::getFixed(1)).first;
6536 
6537     // Compute the scalarization overhead of needed insertelement instructions
6538     // and phi nodes.
6539     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6540       ScalarCost += TTI.getScalarizationOverhead(
6541           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6542           APInt::getAllOnes(VF.getFixedValue()), true, false);
6543       ScalarCost +=
6544           VF.getFixedValue() *
6545           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6546     }
6547 
6548     // Compute the scalarization overhead of needed extractelement
6549     // instructions. For each of the instruction's operands, if the operand can
6550     // be scalarized, add it to the worklist; otherwise, account for the
6551     // overhead.
6552     for (Use &U : I->operands())
6553       if (auto *J = dyn_cast<Instruction>(U.get())) {
6554         assert(VectorType::isValidElementType(J->getType()) &&
6555                "Instruction has non-scalar type");
6556         if (canBeScalarized(J))
6557           Worklist.push_back(J);
6558         else if (needsExtract(J, VF)) {
6559           ScalarCost += TTI.getScalarizationOverhead(
6560               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6561               APInt::getAllOnes(VF.getFixedValue()), false, true);
6562         }
6563       }
6564 
6565     // Scale the total scalar cost by block probability.
6566     ScalarCost /= getReciprocalPredBlockProb();
6567 
6568     // Compute the discount. A non-negative discount means the vector version
6569     // of the instruction costs more, and scalarizing would be beneficial.
6570     Discount += VectorCost - ScalarCost;
6571     ScalarCosts[I] = ScalarCost;
6572   }
6573 
6574   return *Discount.getValue();
6575 }
6576 
6577 LoopVectorizationCostModel::VectorizationCostTy
6578 LoopVectorizationCostModel::expectedCost(
6579     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6580   VectorizationCostTy Cost;
6581 
6582   // For each block.
6583   for (BasicBlock *BB : TheLoop->blocks()) {
6584     VectorizationCostTy BlockCost;
6585 
6586     // For each instruction in the old loop.
6587     for (Instruction &I : BB->instructionsWithoutDebug()) {
6588       // Skip ignored values.
6589       if (ValuesToIgnore.count(&I) ||
6590           (VF.isVector() && VecValuesToIgnore.count(&I)))
6591         continue;
6592 
6593       VectorizationCostTy C = getInstructionCost(&I, VF);
6594 
6595       // Check if we should override the cost.
6596       if (C.first.isValid() &&
6597           ForceTargetInstructionCost.getNumOccurrences() > 0)
6598         C.first = InstructionCost(ForceTargetInstructionCost);
6599 
6600       // Keep a list of instructions with invalid costs.
6601       if (Invalid && !C.first.isValid())
6602         Invalid->emplace_back(&I, VF);
6603 
6604       BlockCost.first += C.first;
6605       BlockCost.second |= C.second;
6606       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6607                         << " for VF " << VF << " For instruction: " << I
6608                         << '\n');
6609     }
6610 
6611     // If we are vectorizing a predicated block, it will have been
6612     // if-converted. This means that the block's instructions (aside from
6613     // stores and instructions that may divide by zero) will now be
6614     // unconditionally executed. For the scalar case, we may not always execute
6615     // the predicated block, if it is an if-else block. Thus, scale the block's
6616     // cost by the probability of executing it. blockNeedsPredication from
6617     // Legal is used so as to not include all blocks in tail folded loops.
6618     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6619       BlockCost.first /= getReciprocalPredBlockProb();
6620 
6621     Cost.first += BlockCost.first;
6622     Cost.second |= BlockCost.second;
6623   }
6624 
6625   return Cost;
6626 }
6627 
6628 /// Gets Address Access SCEV after verifying that the access pattern
6629 /// is loop invariant except the induction variable dependence.
6630 ///
6631 /// This SCEV can be sent to the Target in order to estimate the address
6632 /// calculation cost.
6633 static const SCEV *getAddressAccessSCEV(
6634               Value *Ptr,
6635               LoopVectorizationLegality *Legal,
6636               PredicatedScalarEvolution &PSE,
6637               const Loop *TheLoop) {
6638 
6639   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6640   if (!Gep)
6641     return nullptr;
6642 
6643   // We are looking for a gep with all loop invariant indices except for one
6644   // which should be an induction variable.
6645   auto SE = PSE.getSE();
6646   unsigned NumOperands = Gep->getNumOperands();
6647   for (unsigned i = 1; i < NumOperands; ++i) {
6648     Value *Opd = Gep->getOperand(i);
6649     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6650         !Legal->isInductionVariable(Opd))
6651       return nullptr;
6652   }
6653 
6654   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6655   return PSE.getSCEV(Ptr);
6656 }
6657 
6658 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6659   return Legal->hasStride(I->getOperand(0)) ||
6660          Legal->hasStride(I->getOperand(1));
6661 }
6662 
6663 InstructionCost
6664 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6665                                                         ElementCount VF) {
6666   assert(VF.isVector() &&
6667          "Scalarization cost of instruction implies vectorization.");
6668   if (VF.isScalable())
6669     return InstructionCost::getInvalid();
6670 
6671   Type *ValTy = getLoadStoreType(I);
6672   auto SE = PSE.getSE();
6673 
6674   unsigned AS = getLoadStoreAddressSpace(I);
6675   Value *Ptr = getLoadStorePointerOperand(I);
6676   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6677   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6678   //       that it is being called from this specific place.
6679 
6680   // Figure out whether the access is strided and get the stride value
6681   // if it's known in compile time
6682   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6683 
6684   // Get the cost of the scalar memory instruction and address computation.
6685   InstructionCost Cost =
6686       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6687 
6688   // Don't pass *I here, since it is scalar but will actually be part of a
6689   // vectorized loop where the user of it is a vectorized instruction.
6690   const Align Alignment = getLoadStoreAlignment(I);
6691   Cost += VF.getKnownMinValue() *
6692           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6693                               AS, TTI::TCK_RecipThroughput);
6694 
6695   // Get the overhead of the extractelement and insertelement instructions
6696   // we might create due to scalarization.
6697   Cost += getScalarizationOverhead(I, VF);
6698 
6699   // If we have a predicated load/store, it will need extra i1 extracts and
6700   // conditional branches, but may not be executed for each vector lane. Scale
6701   // the cost by the probability of executing the predicated block.
6702   if (isPredicatedInst(I, VF)) {
6703     Cost /= getReciprocalPredBlockProb();
6704 
6705     // Add the cost of an i1 extract and a branch
6706     auto *Vec_i1Ty =
6707         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6708     Cost += TTI.getScalarizationOverhead(
6709         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6710         /*Insert=*/false, /*Extract=*/true);
6711     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6712   }
6713 
6714   return Cost;
6715 }
6716 
6717 InstructionCost
6718 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6719                                                     ElementCount VF) {
6720   Type *ValTy = getLoadStoreType(I);
6721   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6722   Value *Ptr = getLoadStorePointerOperand(I);
6723   unsigned AS = getLoadStoreAddressSpace(I);
6724   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6725   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6726 
6727   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6728          "Stride should be 1 or -1 for consecutive memory access");
6729   const Align Alignment = getLoadStoreAlignment(I);
6730   InstructionCost Cost = 0;
6731   if (Legal->isMaskRequired(I))
6732     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6733                                       CostKind);
6734   else
6735     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6736                                 CostKind, I);
6737 
6738   bool Reverse = ConsecutiveStride < 0;
6739   if (Reverse)
6740     Cost +=
6741         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6742   return Cost;
6743 }
6744 
6745 InstructionCost
6746 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6747                                                 ElementCount VF) {
6748   assert(Legal->isUniformMemOp(*I));
6749 
6750   Type *ValTy = getLoadStoreType(I);
6751   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6752   const Align Alignment = getLoadStoreAlignment(I);
6753   unsigned AS = getLoadStoreAddressSpace(I);
6754   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6755   if (isa<LoadInst>(I)) {
6756     return TTI.getAddressComputationCost(ValTy) +
6757            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6758                                CostKind) +
6759            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6760   }
6761   StoreInst *SI = cast<StoreInst>(I);
6762 
6763   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6764   return TTI.getAddressComputationCost(ValTy) +
6765          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6766                              CostKind) +
6767          (isLoopInvariantStoreValue
6768               ? 0
6769               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6770                                        VF.getKnownMinValue() - 1));
6771 }
6772 
6773 InstructionCost
6774 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6775                                                  ElementCount VF) {
6776   Type *ValTy = getLoadStoreType(I);
6777   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6778   const Align Alignment = getLoadStoreAlignment(I);
6779   const Value *Ptr = getLoadStorePointerOperand(I);
6780 
6781   return TTI.getAddressComputationCost(VectorTy) +
6782          TTI.getGatherScatterOpCost(
6783              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6784              TargetTransformInfo::TCK_RecipThroughput, I);
6785 }
6786 
6787 InstructionCost
6788 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6789                                                    ElementCount VF) {
6790   // TODO: Once we have support for interleaving with scalable vectors
6791   // we can calculate the cost properly here.
6792   if (VF.isScalable())
6793     return InstructionCost::getInvalid();
6794 
6795   Type *ValTy = getLoadStoreType(I);
6796   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6797   unsigned AS = getLoadStoreAddressSpace(I);
6798 
6799   auto Group = getInterleavedAccessGroup(I);
6800   assert(Group && "Fail to get an interleaved access group.");
6801 
6802   unsigned InterleaveFactor = Group->getFactor();
6803   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6804 
6805   // Holds the indices of existing members in the interleaved group.
6806   SmallVector<unsigned, 4> Indices;
6807   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6808     if (Group->getMember(IF))
6809       Indices.push_back(IF);
6810 
6811   // Calculate the cost of the whole interleaved group.
6812   bool UseMaskForGaps =
6813       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6814       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6815   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6816       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6817       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6818 
6819   if (Group->isReverse()) {
6820     // TODO: Add support for reversed masked interleaved access.
6821     assert(!Legal->isMaskRequired(I) &&
6822            "Reverse masked interleaved access not supported.");
6823     Cost +=
6824         Group->getNumMembers() *
6825         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6826   }
6827   return Cost;
6828 }
6829 
6830 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6831     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6832   using namespace llvm::PatternMatch;
6833   // Early exit for no inloop reductions
6834   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6835     return None;
6836   auto *VectorTy = cast<VectorType>(Ty);
6837 
6838   // We are looking for a pattern of, and finding the minimal acceptable cost:
6839   //  reduce(mul(ext(A), ext(B))) or
6840   //  reduce(mul(A, B)) or
6841   //  reduce(ext(A)) or
6842   //  reduce(A).
6843   // The basic idea is that we walk down the tree to do that, finding the root
6844   // reduction instruction in InLoopReductionImmediateChains. From there we find
6845   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6846   // of the components. If the reduction cost is lower then we return it for the
6847   // reduction instruction and 0 for the other instructions in the pattern. If
6848   // it is not we return an invalid cost specifying the orignal cost method
6849   // should be used.
6850   Instruction *RetI = I;
6851   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6852     if (!RetI->hasOneUser())
6853       return None;
6854     RetI = RetI->user_back();
6855   }
6856   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6857       RetI->user_back()->getOpcode() == Instruction::Add) {
6858     if (!RetI->hasOneUser())
6859       return None;
6860     RetI = RetI->user_back();
6861   }
6862 
6863   // Test if the found instruction is a reduction, and if not return an invalid
6864   // cost specifying the parent to use the original cost modelling.
6865   if (!InLoopReductionImmediateChains.count(RetI))
6866     return None;
6867 
6868   // Find the reduction this chain is a part of and calculate the basic cost of
6869   // the reduction on its own.
6870   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6871   Instruction *ReductionPhi = LastChain;
6872   while (!isa<PHINode>(ReductionPhi))
6873     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6874 
6875   const RecurrenceDescriptor &RdxDesc =
6876       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6877 
6878   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6879       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6880 
6881   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6882   // normal fmul instruction to the cost of the fadd reduction.
6883   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6884     BaseCost +=
6885         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6886 
6887   // If we're using ordered reductions then we can just return the base cost
6888   // here, since getArithmeticReductionCost calculates the full ordered
6889   // reduction cost when FP reassociation is not allowed.
6890   if (useOrderedReductions(RdxDesc))
6891     return BaseCost;
6892 
6893   // Get the operand that was not the reduction chain and match it to one of the
6894   // patterns, returning the better cost if it is found.
6895   Instruction *RedOp = RetI->getOperand(1) == LastChain
6896                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6897                            : dyn_cast<Instruction>(RetI->getOperand(1));
6898 
6899   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6900 
6901   Instruction *Op0, *Op1;
6902   if (RedOp &&
6903       match(RedOp,
6904             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6905       match(Op0, m_ZExtOrSExt(m_Value())) &&
6906       Op0->getOpcode() == Op1->getOpcode() &&
6907       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6908       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6909       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6910 
6911     // Matched reduce(ext(mul(ext(A), ext(B)))
6912     // Note that the extend opcodes need to all match, or if A==B they will have
6913     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6914     // which is equally fine.
6915     bool IsUnsigned = isa<ZExtInst>(Op0);
6916     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6917     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6918 
6919     InstructionCost ExtCost =
6920         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6921                              TTI::CastContextHint::None, CostKind, Op0);
6922     InstructionCost MulCost =
6923         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6924     InstructionCost Ext2Cost =
6925         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6926                              TTI::CastContextHint::None, CostKind, RedOp);
6927 
6928     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6929         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6930         CostKind);
6931 
6932     if (RedCost.isValid() &&
6933         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6934       return I == RetI ? RedCost : 0;
6935   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6936              !TheLoop->isLoopInvariant(RedOp)) {
6937     // Matched reduce(ext(A))
6938     bool IsUnsigned = isa<ZExtInst>(RedOp);
6939     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6940     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6941         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6942         CostKind);
6943 
6944     InstructionCost ExtCost =
6945         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6946                              TTI::CastContextHint::None, CostKind, RedOp);
6947     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6948       return I == RetI ? RedCost : 0;
6949   } else if (RedOp &&
6950              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6951     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6952         Op0->getOpcode() == Op1->getOpcode() &&
6953         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6954       bool IsUnsigned = isa<ZExtInst>(Op0);
6955       Type *Op0Ty = Op0->getOperand(0)->getType();
6956       Type *Op1Ty = Op1->getOperand(0)->getType();
6957       Type *LargestOpTy =
6958           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6959                                                                     : Op0Ty;
6960       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6961 
6962       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6963       // different sizes. We take the largest type as the ext to reduce, and add
6964       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6965       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6966           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6967           TTI::CastContextHint::None, CostKind, Op0);
6968       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6969           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6970           TTI::CastContextHint::None, CostKind, Op1);
6971       InstructionCost MulCost =
6972           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6973 
6974       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6975           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6976           CostKind);
6977       InstructionCost ExtraExtCost = 0;
6978       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6979         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6980         ExtraExtCost = TTI.getCastInstrCost(
6981             ExtraExtOp->getOpcode(), ExtType,
6982             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
6983             TTI::CastContextHint::None, CostKind, ExtraExtOp);
6984       }
6985 
6986       if (RedCost.isValid() &&
6987           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6988         return I == RetI ? RedCost : 0;
6989     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
6990       // Matched reduce(mul())
6991       InstructionCost MulCost =
6992           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6993 
6994       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6995           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6996           CostKind);
6997 
6998       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6999         return I == RetI ? RedCost : 0;
7000     }
7001   }
7002 
7003   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
7004 }
7005 
7006 InstructionCost
7007 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7008                                                      ElementCount VF) {
7009   // Calculate scalar cost only. Vectorization cost should be ready at this
7010   // moment.
7011   if (VF.isScalar()) {
7012     Type *ValTy = getLoadStoreType(I);
7013     const Align Alignment = getLoadStoreAlignment(I);
7014     unsigned AS = getLoadStoreAddressSpace(I);
7015 
7016     return TTI.getAddressComputationCost(ValTy) +
7017            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7018                                TTI::TCK_RecipThroughput, I);
7019   }
7020   return getWideningCost(I, VF);
7021 }
7022 
7023 LoopVectorizationCostModel::VectorizationCostTy
7024 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7025                                                ElementCount VF) {
7026   // If we know that this instruction will remain uniform, check the cost of
7027   // the scalar version.
7028   if (isUniformAfterVectorization(I, VF))
7029     VF = ElementCount::getFixed(1);
7030 
7031   if (VF.isVector() && isProfitableToScalarize(I, VF))
7032     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7033 
7034   // Forced scalars do not have any scalarization overhead.
7035   auto ForcedScalar = ForcedScalars.find(VF);
7036   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7037     auto InstSet = ForcedScalar->second;
7038     if (InstSet.count(I))
7039       return VectorizationCostTy(
7040           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7041            VF.getKnownMinValue()),
7042           false);
7043   }
7044 
7045   Type *VectorTy;
7046   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7047 
7048   bool TypeNotScalarized = false;
7049   if (VF.isVector() && VectorTy->isVectorTy()) {
7050     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
7051     if (NumParts)
7052       TypeNotScalarized = NumParts < VF.getKnownMinValue();
7053     else
7054       C = InstructionCost::getInvalid();
7055   }
7056   return VectorizationCostTy(C, TypeNotScalarized);
7057 }
7058 
7059 InstructionCost
7060 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7061                                                      ElementCount VF) const {
7062 
7063   // There is no mechanism yet to create a scalable scalarization loop,
7064   // so this is currently Invalid.
7065   if (VF.isScalable())
7066     return InstructionCost::getInvalid();
7067 
7068   if (VF.isScalar())
7069     return 0;
7070 
7071   InstructionCost Cost = 0;
7072   Type *RetTy = ToVectorTy(I->getType(), VF);
7073   if (!RetTy->isVoidTy() &&
7074       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7075     Cost += TTI.getScalarizationOverhead(
7076         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
7077         false);
7078 
7079   // Some targets keep addresses scalar.
7080   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7081     return Cost;
7082 
7083   // Some targets support efficient element stores.
7084   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7085     return Cost;
7086 
7087   // Collect operands to consider.
7088   CallInst *CI = dyn_cast<CallInst>(I);
7089   Instruction::op_range Ops = CI ? CI->args() : I->operands();
7090 
7091   // Skip operands that do not require extraction/scalarization and do not incur
7092   // any overhead.
7093   SmallVector<Type *> Tys;
7094   for (auto *V : filterExtractingOperands(Ops, VF))
7095     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7096   return Cost + TTI.getOperandsScalarizationOverhead(
7097                     filterExtractingOperands(Ops, VF), Tys);
7098 }
7099 
7100 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7101   if (VF.isScalar())
7102     return;
7103   NumPredStores = 0;
7104   for (BasicBlock *BB : TheLoop->blocks()) {
7105     // For each instruction in the old loop.
7106     for (Instruction &I : *BB) {
7107       Value *Ptr =  getLoadStorePointerOperand(&I);
7108       if (!Ptr)
7109         continue;
7110 
7111       // TODO: We should generate better code and update the cost model for
7112       // predicated uniform stores. Today they are treated as any other
7113       // predicated store (see added test cases in
7114       // invariant-store-vectorization.ll).
7115       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
7116         NumPredStores++;
7117 
7118       if (Legal->isUniformMemOp(I)) {
7119         // TODO: Avoid replicating loads and stores instead of
7120         // relying on instcombine to remove them.
7121         // Load: Scalar load + broadcast
7122         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7123         InstructionCost Cost;
7124         if (isa<StoreInst>(&I) && VF.isScalable() &&
7125             isLegalGatherOrScatter(&I, VF)) {
7126           Cost = getGatherScatterCost(&I, VF);
7127           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7128         } else {
7129           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7130                  "Cannot yet scalarize uniform stores");
7131           Cost = getUniformMemOpCost(&I, VF);
7132           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7133         }
7134         continue;
7135       }
7136 
7137       // We assume that widening is the best solution when possible.
7138       if (memoryInstructionCanBeWidened(&I, VF)) {
7139         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7140         int ConsecutiveStride = Legal->isConsecutivePtr(
7141             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
7142         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7143                "Expected consecutive stride.");
7144         InstWidening Decision =
7145             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7146         setWideningDecision(&I, VF, Decision, Cost);
7147         continue;
7148       }
7149 
7150       // Choose between Interleaving, Gather/Scatter or Scalarization.
7151       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7152       unsigned NumAccesses = 1;
7153       if (isAccessInterleaved(&I)) {
7154         auto Group = getInterleavedAccessGroup(&I);
7155         assert(Group && "Fail to get an interleaved access group.");
7156 
7157         // Make one decision for the whole group.
7158         if (getWideningDecision(&I, VF) != CM_Unknown)
7159           continue;
7160 
7161         NumAccesses = Group->getNumMembers();
7162         if (interleavedAccessCanBeWidened(&I, VF))
7163           InterleaveCost = getInterleaveGroupCost(&I, VF);
7164       }
7165 
7166       InstructionCost GatherScatterCost =
7167           isLegalGatherOrScatter(&I, VF)
7168               ? getGatherScatterCost(&I, VF) * NumAccesses
7169               : InstructionCost::getInvalid();
7170 
7171       InstructionCost ScalarizationCost =
7172           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7173 
7174       // Choose better solution for the current VF,
7175       // write down this decision and use it during vectorization.
7176       InstructionCost Cost;
7177       InstWidening Decision;
7178       if (InterleaveCost <= GatherScatterCost &&
7179           InterleaveCost < ScalarizationCost) {
7180         Decision = CM_Interleave;
7181         Cost = InterleaveCost;
7182       } else if (GatherScatterCost < ScalarizationCost) {
7183         Decision = CM_GatherScatter;
7184         Cost = GatherScatterCost;
7185       } else {
7186         Decision = CM_Scalarize;
7187         Cost = ScalarizationCost;
7188       }
7189       // If the instructions belongs to an interleave group, the whole group
7190       // receives the same decision. The whole group receives the cost, but
7191       // the cost will actually be assigned to one instruction.
7192       if (auto Group = getInterleavedAccessGroup(&I))
7193         setWideningDecision(Group, VF, Decision, Cost);
7194       else
7195         setWideningDecision(&I, VF, Decision, Cost);
7196     }
7197   }
7198 
7199   // Make sure that any load of address and any other address computation
7200   // remains scalar unless there is gather/scatter support. This avoids
7201   // inevitable extracts into address registers, and also has the benefit of
7202   // activating LSR more, since that pass can't optimize vectorized
7203   // addresses.
7204   if (TTI.prefersVectorizedAddressing())
7205     return;
7206 
7207   // Start with all scalar pointer uses.
7208   SmallPtrSet<Instruction *, 8> AddrDefs;
7209   for (BasicBlock *BB : TheLoop->blocks())
7210     for (Instruction &I : *BB) {
7211       Instruction *PtrDef =
7212         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7213       if (PtrDef && TheLoop->contains(PtrDef) &&
7214           getWideningDecision(&I, VF) != CM_GatherScatter)
7215         AddrDefs.insert(PtrDef);
7216     }
7217 
7218   // Add all instructions used to generate the addresses.
7219   SmallVector<Instruction *, 4> Worklist;
7220   append_range(Worklist, AddrDefs);
7221   while (!Worklist.empty()) {
7222     Instruction *I = Worklist.pop_back_val();
7223     for (auto &Op : I->operands())
7224       if (auto *InstOp = dyn_cast<Instruction>(Op))
7225         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7226             AddrDefs.insert(InstOp).second)
7227           Worklist.push_back(InstOp);
7228   }
7229 
7230   for (auto *I : AddrDefs) {
7231     if (isa<LoadInst>(I)) {
7232       // Setting the desired widening decision should ideally be handled in
7233       // by cost functions, but since this involves the task of finding out
7234       // if the loaded register is involved in an address computation, it is
7235       // instead changed here when we know this is the case.
7236       InstWidening Decision = getWideningDecision(I, VF);
7237       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7238         // Scalarize a widened load of address.
7239         setWideningDecision(
7240             I, VF, CM_Scalarize,
7241             (VF.getKnownMinValue() *
7242              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7243       else if (auto Group = getInterleavedAccessGroup(I)) {
7244         // Scalarize an interleave group of address loads.
7245         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7246           if (Instruction *Member = Group->getMember(I))
7247             setWideningDecision(
7248                 Member, VF, CM_Scalarize,
7249                 (VF.getKnownMinValue() *
7250                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7251         }
7252       }
7253     } else
7254       // Make sure I gets scalarized and a cost estimate without
7255       // scalarization overhead.
7256       ForcedScalars[VF].insert(I);
7257   }
7258 }
7259 
7260 InstructionCost
7261 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7262                                                Type *&VectorTy) {
7263   Type *RetTy = I->getType();
7264   if (canTruncateToMinimalBitwidth(I, VF))
7265     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7266   auto SE = PSE.getSE();
7267   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7268 
7269   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7270                                                 ElementCount VF) -> bool {
7271     if (VF.isScalar())
7272       return true;
7273 
7274     auto Scalarized = InstsToScalarize.find(VF);
7275     assert(Scalarized != InstsToScalarize.end() &&
7276            "VF not yet analyzed for scalarization profitability");
7277     return !Scalarized->second.count(I) &&
7278            llvm::all_of(I->users(), [&](User *U) {
7279              auto *UI = cast<Instruction>(U);
7280              return !Scalarized->second.count(UI);
7281            });
7282   };
7283   (void) hasSingleCopyAfterVectorization;
7284 
7285   if (isScalarAfterVectorization(I, VF)) {
7286     // With the exception of GEPs and PHIs, after scalarization there should
7287     // only be one copy of the instruction generated in the loop. This is
7288     // because the VF is either 1, or any instructions that need scalarizing
7289     // have already been dealt with by the the time we get here. As a result,
7290     // it means we don't have to multiply the instruction cost by VF.
7291     assert(I->getOpcode() == Instruction::GetElementPtr ||
7292            I->getOpcode() == Instruction::PHI ||
7293            (I->getOpcode() == Instruction::BitCast &&
7294             I->getType()->isPointerTy()) ||
7295            hasSingleCopyAfterVectorization(I, VF));
7296     VectorTy = RetTy;
7297   } else
7298     VectorTy = ToVectorTy(RetTy, VF);
7299 
7300   // TODO: We need to estimate the cost of intrinsic calls.
7301   switch (I->getOpcode()) {
7302   case Instruction::GetElementPtr:
7303     // We mark this instruction as zero-cost because the cost of GEPs in
7304     // vectorized code depends on whether the corresponding memory instruction
7305     // is scalarized or not. Therefore, we handle GEPs with the memory
7306     // instruction cost.
7307     return 0;
7308   case Instruction::Br: {
7309     // In cases of scalarized and predicated instructions, there will be VF
7310     // predicated blocks in the vectorized loop. Each branch around these
7311     // blocks requires also an extract of its vector compare i1 element.
7312     bool ScalarPredicatedBB = false;
7313     BranchInst *BI = cast<BranchInst>(I);
7314     if (VF.isVector() && BI->isConditional() &&
7315         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7316          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7317       ScalarPredicatedBB = true;
7318 
7319     if (ScalarPredicatedBB) {
7320       // Not possible to scalarize scalable vector with predicated instructions.
7321       if (VF.isScalable())
7322         return InstructionCost::getInvalid();
7323       // Return cost for branches around scalarized and predicated blocks.
7324       auto *Vec_i1Ty =
7325           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7326       return (
7327           TTI.getScalarizationOverhead(
7328               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7329           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7330     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7331       // The back-edge branch will remain, as will all scalar branches.
7332       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7333     else
7334       // This branch will be eliminated by if-conversion.
7335       return 0;
7336     // Note: We currently assume zero cost for an unconditional branch inside
7337     // a predicated block since it will become a fall-through, although we
7338     // may decide in the future to call TTI for all branches.
7339   }
7340   case Instruction::PHI: {
7341     auto *Phi = cast<PHINode>(I);
7342 
7343     // First-order recurrences are replaced by vector shuffles inside the loop.
7344     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7345     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7346       return TTI.getShuffleCost(
7347           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7348           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7349 
7350     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7351     // converted into select instructions. We require N - 1 selects per phi
7352     // node, where N is the number of incoming values.
7353     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7354       return (Phi->getNumIncomingValues() - 1) *
7355              TTI.getCmpSelInstrCost(
7356                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7357                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7358                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7359 
7360     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7361   }
7362   case Instruction::UDiv:
7363   case Instruction::SDiv:
7364   case Instruction::URem:
7365   case Instruction::SRem:
7366     // If we have a predicated instruction, it may not be executed for each
7367     // vector lane. Get the scalarization cost and scale this amount by the
7368     // probability of executing the predicated block. If the instruction is not
7369     // predicated, we fall through to the next case.
7370     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7371       InstructionCost Cost = 0;
7372 
7373       // These instructions have a non-void type, so account for the phi nodes
7374       // that we will create. This cost is likely to be zero. The phi node
7375       // cost, if any, should be scaled by the block probability because it
7376       // models a copy at the end of each predicated block.
7377       Cost += VF.getKnownMinValue() *
7378               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7379 
7380       // The cost of the non-predicated instruction.
7381       Cost += VF.getKnownMinValue() *
7382               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7383 
7384       // The cost of insertelement and extractelement instructions needed for
7385       // scalarization.
7386       Cost += getScalarizationOverhead(I, VF);
7387 
7388       // Scale the cost by the probability of executing the predicated blocks.
7389       // This assumes the predicated block for each vector lane is equally
7390       // likely.
7391       return Cost / getReciprocalPredBlockProb();
7392     }
7393     LLVM_FALLTHROUGH;
7394   case Instruction::Add:
7395   case Instruction::FAdd:
7396   case Instruction::Sub:
7397   case Instruction::FSub:
7398   case Instruction::Mul:
7399   case Instruction::FMul:
7400   case Instruction::FDiv:
7401   case Instruction::FRem:
7402   case Instruction::Shl:
7403   case Instruction::LShr:
7404   case Instruction::AShr:
7405   case Instruction::And:
7406   case Instruction::Or:
7407   case Instruction::Xor: {
7408     // Since we will replace the stride by 1 the multiplication should go away.
7409     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7410       return 0;
7411 
7412     // Detect reduction patterns
7413     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7414       return *RedCost;
7415 
7416     // Certain instructions can be cheaper to vectorize if they have a constant
7417     // second vector operand. One example of this are shifts on x86.
7418     Value *Op2 = I->getOperand(1);
7419     TargetTransformInfo::OperandValueProperties Op2VP;
7420     TargetTransformInfo::OperandValueKind Op2VK =
7421         TTI.getOperandInfo(Op2, Op2VP);
7422     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7423       Op2VK = TargetTransformInfo::OK_UniformValue;
7424 
7425     SmallVector<const Value *, 4> Operands(I->operand_values());
7426     return TTI.getArithmeticInstrCost(
7427         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7428         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7429   }
7430   case Instruction::FNeg: {
7431     return TTI.getArithmeticInstrCost(
7432         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7433         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7434         TargetTransformInfo::OP_None, I->getOperand(0), I);
7435   }
7436   case Instruction::Select: {
7437     SelectInst *SI = cast<SelectInst>(I);
7438     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7439     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7440 
7441     const Value *Op0, *Op1;
7442     using namespace llvm::PatternMatch;
7443     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7444                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7445       // select x, y, false --> x & y
7446       // select x, true, y --> x | y
7447       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7448       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7449       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7450       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7451       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7452               Op1->getType()->getScalarSizeInBits() == 1);
7453 
7454       SmallVector<const Value *, 2> Operands{Op0, Op1};
7455       return TTI.getArithmeticInstrCost(
7456           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7457           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7458     }
7459 
7460     Type *CondTy = SI->getCondition()->getType();
7461     if (!ScalarCond)
7462       CondTy = VectorType::get(CondTy, VF);
7463 
7464     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7465     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7466       Pred = Cmp->getPredicate();
7467     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7468                                   CostKind, I);
7469   }
7470   case Instruction::ICmp:
7471   case Instruction::FCmp: {
7472     Type *ValTy = I->getOperand(0)->getType();
7473     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7474     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7475       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7476     VectorTy = ToVectorTy(ValTy, VF);
7477     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7478                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7479                                   I);
7480   }
7481   case Instruction::Store:
7482   case Instruction::Load: {
7483     ElementCount Width = VF;
7484     if (Width.isVector()) {
7485       InstWidening Decision = getWideningDecision(I, Width);
7486       assert(Decision != CM_Unknown &&
7487              "CM decision should be taken at this point");
7488       if (Decision == CM_Scalarize)
7489         Width = ElementCount::getFixed(1);
7490     }
7491     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7492     return getMemoryInstructionCost(I, VF);
7493   }
7494   case Instruction::BitCast:
7495     if (I->getType()->isPointerTy())
7496       return 0;
7497     LLVM_FALLTHROUGH;
7498   case Instruction::ZExt:
7499   case Instruction::SExt:
7500   case Instruction::FPToUI:
7501   case Instruction::FPToSI:
7502   case Instruction::FPExt:
7503   case Instruction::PtrToInt:
7504   case Instruction::IntToPtr:
7505   case Instruction::SIToFP:
7506   case Instruction::UIToFP:
7507   case Instruction::Trunc:
7508   case Instruction::FPTrunc: {
7509     // Computes the CastContextHint from a Load/Store instruction.
7510     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7511       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7512              "Expected a load or a store!");
7513 
7514       if (VF.isScalar() || !TheLoop->contains(I))
7515         return TTI::CastContextHint::Normal;
7516 
7517       switch (getWideningDecision(I, VF)) {
7518       case LoopVectorizationCostModel::CM_GatherScatter:
7519         return TTI::CastContextHint::GatherScatter;
7520       case LoopVectorizationCostModel::CM_Interleave:
7521         return TTI::CastContextHint::Interleave;
7522       case LoopVectorizationCostModel::CM_Scalarize:
7523       case LoopVectorizationCostModel::CM_Widen:
7524         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7525                                         : TTI::CastContextHint::Normal;
7526       case LoopVectorizationCostModel::CM_Widen_Reverse:
7527         return TTI::CastContextHint::Reversed;
7528       case LoopVectorizationCostModel::CM_Unknown:
7529         llvm_unreachable("Instr did not go through cost modelling?");
7530       }
7531 
7532       llvm_unreachable("Unhandled case!");
7533     };
7534 
7535     unsigned Opcode = I->getOpcode();
7536     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7537     // For Trunc, the context is the only user, which must be a StoreInst.
7538     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7539       if (I->hasOneUse())
7540         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7541           CCH = ComputeCCH(Store);
7542     }
7543     // For Z/Sext, the context is the operand, which must be a LoadInst.
7544     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7545              Opcode == Instruction::FPExt) {
7546       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7547         CCH = ComputeCCH(Load);
7548     }
7549 
7550     // We optimize the truncation of induction variables having constant
7551     // integer steps. The cost of these truncations is the same as the scalar
7552     // operation.
7553     if (isOptimizableIVTruncate(I, VF)) {
7554       auto *Trunc = cast<TruncInst>(I);
7555       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7556                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7557     }
7558 
7559     // Detect reduction patterns
7560     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7561       return *RedCost;
7562 
7563     Type *SrcScalarTy = I->getOperand(0)->getType();
7564     Type *SrcVecTy =
7565         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7566     if (canTruncateToMinimalBitwidth(I, VF)) {
7567       // This cast is going to be shrunk. This may remove the cast or it might
7568       // turn it into slightly different cast. For example, if MinBW == 16,
7569       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7570       //
7571       // Calculate the modified src and dest types.
7572       Type *MinVecTy = VectorTy;
7573       if (Opcode == Instruction::Trunc) {
7574         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7575         VectorTy =
7576             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7577       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7578         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7579         VectorTy =
7580             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7581       }
7582     }
7583 
7584     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7585   }
7586   case Instruction::Call: {
7587     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7588       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7589         return *RedCost;
7590     bool NeedToScalarize;
7591     CallInst *CI = cast<CallInst>(I);
7592     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7593     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7594       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7595       return std::min(CallCost, IntrinsicCost);
7596     }
7597     return CallCost;
7598   }
7599   case Instruction::ExtractValue:
7600     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7601   case Instruction::Alloca:
7602     // We cannot easily widen alloca to a scalable alloca, as
7603     // the result would need to be a vector of pointers.
7604     if (VF.isScalable())
7605       return InstructionCost::getInvalid();
7606     LLVM_FALLTHROUGH;
7607   default:
7608     // This opcode is unknown. Assume that it is the same as 'mul'.
7609     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7610   } // end of switch.
7611 }
7612 
7613 char LoopVectorize::ID = 0;
7614 
7615 static const char lv_name[] = "Loop Vectorization";
7616 
7617 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7618 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7619 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7620 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7621 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7622 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7623 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7624 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7625 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7626 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7627 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7628 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7629 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7630 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7631 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7632 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7633 
7634 namespace llvm {
7635 
7636 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7637 
7638 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7639                               bool VectorizeOnlyWhenForced) {
7640   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7641 }
7642 
7643 } // end namespace llvm
7644 
7645 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7646   // Check if the pointer operand of a load or store instruction is
7647   // consecutive.
7648   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7649     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7650   return false;
7651 }
7652 
7653 void LoopVectorizationCostModel::collectValuesToIgnore() {
7654   // Ignore ephemeral values.
7655   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7656 
7657   // Ignore type-promoting instructions we identified during reduction
7658   // detection.
7659   for (auto &Reduction : Legal->getReductionVars()) {
7660     const RecurrenceDescriptor &RedDes = Reduction.second;
7661     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7662     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7663   }
7664   // Ignore type-casting instructions we identified during induction
7665   // detection.
7666   for (auto &Induction : Legal->getInductionVars()) {
7667     const InductionDescriptor &IndDes = Induction.second;
7668     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7669     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7670   }
7671 }
7672 
7673 void LoopVectorizationCostModel::collectInLoopReductions() {
7674   for (auto &Reduction : Legal->getReductionVars()) {
7675     PHINode *Phi = Reduction.first;
7676     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7677 
7678     // We don't collect reductions that are type promoted (yet).
7679     if (RdxDesc.getRecurrenceType() != Phi->getType())
7680       continue;
7681 
7682     // If the target would prefer this reduction to happen "in-loop", then we
7683     // want to record it as such.
7684     unsigned Opcode = RdxDesc.getOpcode();
7685     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7686         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7687                                    TargetTransformInfo::ReductionFlags()))
7688       continue;
7689 
7690     // Check that we can correctly put the reductions into the loop, by
7691     // finding the chain of operations that leads from the phi to the loop
7692     // exit value.
7693     SmallVector<Instruction *, 4> ReductionOperations =
7694         RdxDesc.getReductionOpChain(Phi, TheLoop);
7695     bool InLoop = !ReductionOperations.empty();
7696     if (InLoop) {
7697       InLoopReductionChains[Phi] = ReductionOperations;
7698       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7699       Instruction *LastChain = Phi;
7700       for (auto *I : ReductionOperations) {
7701         InLoopReductionImmediateChains[I] = LastChain;
7702         LastChain = I;
7703       }
7704     }
7705     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7706                       << " reduction for phi: " << *Phi << "\n");
7707   }
7708 }
7709 
7710 // TODO: we could return a pair of values that specify the max VF and
7711 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7712 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7713 // doesn't have a cost model that can choose which plan to execute if
7714 // more than one is generated.
7715 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7716                                  LoopVectorizationCostModel &CM) {
7717   unsigned WidestType;
7718   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7719   return WidestVectorRegBits / WidestType;
7720 }
7721 
7722 VectorizationFactor
7723 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7724   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7725   ElementCount VF = UserVF;
7726   // Outer loop handling: They may require CFG and instruction level
7727   // transformations before even evaluating whether vectorization is profitable.
7728   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7729   // the vectorization pipeline.
7730   if (!OrigLoop->isInnermost()) {
7731     // If the user doesn't provide a vectorization factor, determine a
7732     // reasonable one.
7733     if (UserVF.isZero()) {
7734       VF = ElementCount::getFixed(determineVPlanVF(
7735           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7736               .getFixedSize(),
7737           CM));
7738       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7739 
7740       // Make sure we have a VF > 1 for stress testing.
7741       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7742         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7743                           << "overriding computed VF.\n");
7744         VF = ElementCount::getFixed(4);
7745       }
7746     }
7747     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7748     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7749            "VF needs to be a power of two");
7750     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7751                       << "VF " << VF << " to build VPlans.\n");
7752     buildVPlans(VF, VF);
7753 
7754     // For VPlan build stress testing, we bail out after VPlan construction.
7755     if (VPlanBuildStressTest)
7756       return VectorizationFactor::Disabled();
7757 
7758     return {VF, 0 /*Cost*/};
7759   }
7760 
7761   LLVM_DEBUG(
7762       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7763                 "VPlan-native path.\n");
7764   return VectorizationFactor::Disabled();
7765 }
7766 
7767 Optional<VectorizationFactor>
7768 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7769   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7770   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7771   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7772     return None;
7773 
7774   // Invalidate interleave groups if all blocks of loop will be predicated.
7775   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7776       !useMaskedInterleavedAccesses(*TTI)) {
7777     LLVM_DEBUG(
7778         dbgs()
7779         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7780            "which requires masked-interleaved support.\n");
7781     if (CM.InterleaveInfo.invalidateGroups())
7782       // Invalidating interleave groups also requires invalidating all decisions
7783       // based on them, which includes widening decisions and uniform and scalar
7784       // values.
7785       CM.invalidateCostModelingDecisions();
7786   }
7787 
7788   ElementCount MaxUserVF =
7789       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7790   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7791   if (!UserVF.isZero() && UserVFIsLegal) {
7792     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7793            "VF needs to be a power of two");
7794     // Collect the instructions (and their associated costs) that will be more
7795     // profitable to scalarize.
7796     if (CM.selectUserVectorizationFactor(UserVF)) {
7797       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7798       CM.collectInLoopReductions();
7799       buildVPlansWithVPRecipes(UserVF, UserVF);
7800       LLVM_DEBUG(printPlans(dbgs()));
7801       return {{UserVF, 0}};
7802     } else
7803       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7804                               "InvalidCost", ORE, OrigLoop);
7805   }
7806 
7807   // Populate the set of Vectorization Factor Candidates.
7808   ElementCountSet VFCandidates;
7809   for (auto VF = ElementCount::getFixed(1);
7810        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7811     VFCandidates.insert(VF);
7812   for (auto VF = ElementCount::getScalable(1);
7813        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7814     VFCandidates.insert(VF);
7815 
7816   for (const auto &VF : VFCandidates) {
7817     // Collect Uniform and Scalar instructions after vectorization with VF.
7818     CM.collectUniformsAndScalars(VF);
7819 
7820     // Collect the instructions (and their associated costs) that will be more
7821     // profitable to scalarize.
7822     if (VF.isVector())
7823       CM.collectInstsToScalarize(VF);
7824   }
7825 
7826   CM.collectInLoopReductions();
7827   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7828   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7829 
7830   LLVM_DEBUG(printPlans(dbgs()));
7831   if (!MaxFactors.hasVector())
7832     return VectorizationFactor::Disabled();
7833 
7834   // Select the optimal vectorization factor.
7835   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7836 
7837   // Check if it is profitable to vectorize with runtime checks.
7838   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7839   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7840     bool PragmaThresholdReached =
7841         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7842     bool ThresholdReached =
7843         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7844     if ((ThresholdReached && !Hints.allowReordering()) ||
7845         PragmaThresholdReached) {
7846       ORE->emit([&]() {
7847         return OptimizationRemarkAnalysisAliasing(
7848                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7849                    OrigLoop->getHeader())
7850                << "loop not vectorized: cannot prove it is safe to reorder "
7851                   "memory operations";
7852       });
7853       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7854       Hints.emitRemarkWithHints();
7855       return VectorizationFactor::Disabled();
7856     }
7857   }
7858   return SelectedVF;
7859 }
7860 
7861 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7862   assert(count_if(VPlans,
7863                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7864              1 &&
7865          "Best VF has not a single VPlan.");
7866 
7867   for (const VPlanPtr &Plan : VPlans) {
7868     if (Plan->hasVF(VF))
7869       return *Plan.get();
7870   }
7871   llvm_unreachable("No plan found!");
7872 }
7873 
7874 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7875   SmallVector<Metadata *, 4> MDs;
7876   // Reserve first location for self reference to the LoopID metadata node.
7877   MDs.push_back(nullptr);
7878   bool IsUnrollMetadata = false;
7879   MDNode *LoopID = L->getLoopID();
7880   if (LoopID) {
7881     // First find existing loop unrolling disable metadata.
7882     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7883       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7884       if (MD) {
7885         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7886         IsUnrollMetadata =
7887             S && S->getString().startswith("llvm.loop.unroll.disable");
7888       }
7889       MDs.push_back(LoopID->getOperand(i));
7890     }
7891   }
7892 
7893   if (!IsUnrollMetadata) {
7894     // Add runtime unroll disable metadata.
7895     LLVMContext &Context = L->getHeader()->getContext();
7896     SmallVector<Metadata *, 1> DisableOperands;
7897     DisableOperands.push_back(
7898         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7899     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7900     MDs.push_back(DisableNode);
7901     MDNode *NewLoopID = MDNode::get(Context, MDs);
7902     // Set operand 0 to refer to the loop id itself.
7903     NewLoopID->replaceOperandWith(0, NewLoopID);
7904     L->setLoopID(NewLoopID);
7905   }
7906 }
7907 
7908 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7909                                            VPlan &BestVPlan,
7910                                            InnerLoopVectorizer &ILV,
7911                                            DominatorTree *DT) {
7912   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7913                     << '\n');
7914 
7915   // Perform the actual loop transformation.
7916 
7917   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7918   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7919   Value *CanonicalIVStartValue;
7920   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7921       ILV.createVectorizedLoopSkeleton();
7922   ILV.collectPoisonGeneratingRecipes(State);
7923 
7924   ILV.printDebugTracesAtStart();
7925 
7926   //===------------------------------------------------===//
7927   //
7928   // Notice: any optimization or new instruction that go
7929   // into the code below should also be implemented in
7930   // the cost-model.
7931   //
7932   //===------------------------------------------------===//
7933 
7934   // 2. Copy and widen instructions from the old loop into the new loop.
7935   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7936                              ILV.getOrCreateVectorTripCount(nullptr),
7937                              CanonicalIVStartValue, State);
7938   BestVPlan.execute(&State);
7939 
7940   // Keep all loop hints from the original loop on the vector loop (we'll
7941   // replace the vectorizer-specific hints below).
7942   MDNode *OrigLoopID = OrigLoop->getLoopID();
7943 
7944   Optional<MDNode *> VectorizedLoopID =
7945       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7946                                       LLVMLoopVectorizeFollowupVectorized});
7947 
7948   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7949   if (VectorizedLoopID.hasValue())
7950     L->setLoopID(VectorizedLoopID.getValue());
7951   else {
7952     // Keep all loop hints from the original loop on the vector loop (we'll
7953     // replace the vectorizer-specific hints below).
7954     if (MDNode *LID = OrigLoop->getLoopID())
7955       L->setLoopID(LID);
7956 
7957     LoopVectorizeHints Hints(L, true, *ORE);
7958     Hints.setAlreadyVectorized();
7959   }
7960   // Disable runtime unrolling when vectorizing the epilogue loop.
7961   if (CanonicalIVStartValue)
7962     AddRuntimeUnrollDisableMetaData(L);
7963 
7964   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7965   //    predication, updating analyses.
7966   ILV.fixVectorizedLoop(State);
7967 
7968   ILV.printDebugTracesAtEnd();
7969 }
7970 
7971 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7972 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7973   for (const auto &Plan : VPlans)
7974     if (PrintVPlansInDotFormat)
7975       Plan->printDOT(O);
7976     else
7977       Plan->print(O);
7978 }
7979 #endif
7980 
7981 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7982     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7983 
7984   // We create new control-flow for the vectorized loop, so the original exit
7985   // conditions will be dead after vectorization if it's only used by the
7986   // terminator
7987   SmallVector<BasicBlock*> ExitingBlocks;
7988   OrigLoop->getExitingBlocks(ExitingBlocks);
7989   for (auto *BB : ExitingBlocks) {
7990     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7991     if (!Cmp || !Cmp->hasOneUse())
7992       continue;
7993 
7994     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7995     if (!DeadInstructions.insert(Cmp).second)
7996       continue;
7997 
7998     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7999     // TODO: can recurse through operands in general
8000     for (Value *Op : Cmp->operands()) {
8001       if (isa<TruncInst>(Op) && Op->hasOneUse())
8002           DeadInstructions.insert(cast<Instruction>(Op));
8003     }
8004   }
8005 
8006   // We create new "steps" for induction variable updates to which the original
8007   // induction variables map. An original update instruction will be dead if
8008   // all its users except the induction variable are dead.
8009   auto *Latch = OrigLoop->getLoopLatch();
8010   for (auto &Induction : Legal->getInductionVars()) {
8011     PHINode *Ind = Induction.first;
8012     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8013 
8014     // If the tail is to be folded by masking, the primary induction variable,
8015     // if exists, isn't dead: it will be used for masking. Don't kill it.
8016     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8017       continue;
8018 
8019     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8020           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8021         }))
8022       DeadInstructions.insert(IndUpdate);
8023   }
8024 }
8025 
8026 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8027 
8028 //===--------------------------------------------------------------------===//
8029 // EpilogueVectorizerMainLoop
8030 //===--------------------------------------------------------------------===//
8031 
8032 /// This function is partially responsible for generating the control flow
8033 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8034 std::pair<BasicBlock *, Value *>
8035 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8036   MDNode *OrigLoopID = OrigLoop->getLoopID();
8037   Loop *Lp = createVectorLoopSkeleton("");
8038 
8039   // Generate the code to check the minimum iteration count of the vector
8040   // epilogue (see below).
8041   EPI.EpilogueIterationCountCheck =
8042       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8043   EPI.EpilogueIterationCountCheck->setName("iter.check");
8044 
8045   // Generate the code to check any assumptions that we've made for SCEV
8046   // expressions.
8047   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8048 
8049   // Generate the code that checks at runtime if arrays overlap. We put the
8050   // checks into a separate block to make the more common case of few elements
8051   // faster.
8052   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8053 
8054   // Generate the iteration count check for the main loop, *after* the check
8055   // for the epilogue loop, so that the path-length is shorter for the case
8056   // that goes directly through the vector epilogue. The longer-path length for
8057   // the main loop is compensated for, by the gain from vectorizing the larger
8058   // trip count. Note: the branch will get updated later on when we vectorize
8059   // the epilogue.
8060   EPI.MainLoopIterationCountCheck =
8061       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8062 
8063   // Generate the induction variable.
8064   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8065   EPI.VectorTripCount = CountRoundDown;
8066   createHeaderBranch(Lp);
8067 
8068   // Skip induction resume value creation here because they will be created in
8069   // the second pass. If we created them here, they wouldn't be used anyway,
8070   // because the vplan in the second pass still contains the inductions from the
8071   // original loop.
8072 
8073   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
8074 }
8075 
8076 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8077   LLVM_DEBUG({
8078     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8079            << "Main Loop VF:" << EPI.MainLoopVF
8080            << ", Main Loop UF:" << EPI.MainLoopUF
8081            << ", Epilogue Loop VF:" << EPI.EpilogueVF
8082            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8083   });
8084 }
8085 
8086 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8087   DEBUG_WITH_TYPE(VerboseDebug, {
8088     dbgs() << "intermediate fn:\n"
8089            << *OrigLoop->getHeader()->getParent() << "\n";
8090   });
8091 }
8092 
8093 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8094     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8095   assert(L && "Expected valid Loop.");
8096   assert(Bypass && "Expected valid bypass basic block.");
8097   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
8098   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8099   Value *Count = getOrCreateTripCount(L);
8100   // Reuse existing vector loop preheader for TC checks.
8101   // Note that new preheader block is generated for vector loop.
8102   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8103   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8104 
8105   // Generate code to check if the loop's trip count is less than VF * UF of the
8106   // main vector loop.
8107   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8108       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8109 
8110   Value *CheckMinIters = Builder.CreateICmp(
8111       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
8112       "min.iters.check");
8113 
8114   if (!ForEpilogue)
8115     TCCheckBlock->setName("vector.main.loop.iter.check");
8116 
8117   // Create new preheader for vector loop.
8118   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8119                                    DT, LI, nullptr, "vector.ph");
8120 
8121   if (ForEpilogue) {
8122     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8123                                  DT->getNode(Bypass)->getIDom()) &&
8124            "TC check is expected to dominate Bypass");
8125 
8126     // Update dominator for Bypass & LoopExit.
8127     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8128     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8129       // For loops with multiple exits, there's no edge from the middle block
8130       // to exit blocks (as the epilogue must run) and thus no need to update
8131       // the immediate dominator of the exit blocks.
8132       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8133 
8134     LoopBypassBlocks.push_back(TCCheckBlock);
8135 
8136     // Save the trip count so we don't have to regenerate it in the
8137     // vec.epilog.iter.check. This is safe to do because the trip count
8138     // generated here dominates the vector epilog iter check.
8139     EPI.TripCount = Count;
8140   }
8141 
8142   ReplaceInstWithInst(
8143       TCCheckBlock->getTerminator(),
8144       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8145 
8146   return TCCheckBlock;
8147 }
8148 
8149 //===--------------------------------------------------------------------===//
8150 // EpilogueVectorizerEpilogueLoop
8151 //===--------------------------------------------------------------------===//
8152 
8153 /// This function is partially responsible for generating the control flow
8154 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8155 std::pair<BasicBlock *, Value *>
8156 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8157   MDNode *OrigLoopID = OrigLoop->getLoopID();
8158   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8159 
8160   // Now, compare the remaining count and if there aren't enough iterations to
8161   // execute the vectorized epilogue skip to the scalar part.
8162   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8163   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8164   LoopVectorPreHeader =
8165       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8166                  LI, nullptr, "vec.epilog.ph");
8167   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8168                                           VecEpilogueIterationCountCheck);
8169 
8170   // Adjust the control flow taking the state info from the main loop
8171   // vectorization into account.
8172   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8173          "expected this to be saved from the previous pass.");
8174   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8175       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8176 
8177   DT->changeImmediateDominator(LoopVectorPreHeader,
8178                                EPI.MainLoopIterationCountCheck);
8179 
8180   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8181       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8182 
8183   if (EPI.SCEVSafetyCheck)
8184     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8185         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8186   if (EPI.MemSafetyCheck)
8187     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8188         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8189 
8190   DT->changeImmediateDominator(
8191       VecEpilogueIterationCountCheck,
8192       VecEpilogueIterationCountCheck->getSinglePredecessor());
8193 
8194   DT->changeImmediateDominator(LoopScalarPreHeader,
8195                                EPI.EpilogueIterationCountCheck);
8196   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8197     // If there is an epilogue which must run, there's no edge from the
8198     // middle block to exit blocks  and thus no need to update the immediate
8199     // dominator of the exit blocks.
8200     DT->changeImmediateDominator(LoopExitBlock,
8201                                  EPI.EpilogueIterationCountCheck);
8202 
8203   // Keep track of bypass blocks, as they feed start values to the induction
8204   // phis in the scalar loop preheader.
8205   if (EPI.SCEVSafetyCheck)
8206     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8207   if (EPI.MemSafetyCheck)
8208     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8209   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8210 
8211   // The vec.epilog.iter.check block may contain Phi nodes from reductions which
8212   // merge control-flow from the latch block and the middle block. Update the
8213   // incoming values here and move the Phi into the preheader.
8214   SmallVector<PHINode *, 4> PhisInBlock;
8215   for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
8216     PhisInBlock.push_back(&Phi);
8217 
8218   for (PHINode *Phi : PhisInBlock) {
8219     Phi->replaceIncomingBlockWith(
8220         VecEpilogueIterationCountCheck->getSinglePredecessor(),
8221         VecEpilogueIterationCountCheck);
8222     Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
8223     if (EPI.SCEVSafetyCheck)
8224       Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
8225     if (EPI.MemSafetyCheck)
8226       Phi->removeIncomingValue(EPI.MemSafetyCheck);
8227     Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
8228   }
8229 
8230   // Generate a resume induction for the vector epilogue and put it in the
8231   // vector epilogue preheader
8232   Type *IdxTy = Legal->getWidestInductionType();
8233   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8234                                          LoopVectorPreHeader->getFirstNonPHI());
8235   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8236   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8237                            EPI.MainLoopIterationCountCheck);
8238 
8239   // Generate the induction variable.
8240   createHeaderBranch(Lp);
8241 
8242   // Generate induction resume values. These variables save the new starting
8243   // indexes for the scalar loop. They are used to test if there are any tail
8244   // iterations left once the vector loop has completed.
8245   // Note that when the vectorized epilogue is skipped due to iteration count
8246   // check, then the resume value for the induction variable comes from
8247   // the trip count of the main vector loop, hence passing the AdditionalBypass
8248   // argument.
8249   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8250                                    EPI.VectorTripCount} /* AdditionalBypass */);
8251 
8252   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8253 }
8254 
8255 BasicBlock *
8256 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8257     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8258 
8259   assert(EPI.TripCount &&
8260          "Expected trip count to have been safed in the first pass.");
8261   assert(
8262       (!isa<Instruction>(EPI.TripCount) ||
8263        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8264       "saved trip count does not dominate insertion point.");
8265   Value *TC = EPI.TripCount;
8266   IRBuilder<> Builder(Insert->getTerminator());
8267   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8268 
8269   // Generate code to check if the loop's trip count is less than VF * UF of the
8270   // vector epilogue loop.
8271   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8272       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8273 
8274   Value *CheckMinIters =
8275       Builder.CreateICmp(P, Count,
8276                          createStepForVF(Builder, Count->getType(),
8277                                          EPI.EpilogueVF, EPI.EpilogueUF),
8278                          "min.epilog.iters.check");
8279 
8280   ReplaceInstWithInst(
8281       Insert->getTerminator(),
8282       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8283 
8284   LoopBypassBlocks.push_back(Insert);
8285   return Insert;
8286 }
8287 
8288 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8289   LLVM_DEBUG({
8290     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8291            << "Epilogue Loop VF:" << EPI.EpilogueVF
8292            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8293   });
8294 }
8295 
8296 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8297   DEBUG_WITH_TYPE(VerboseDebug, {
8298     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8299   });
8300 }
8301 
8302 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8303     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8304   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8305   bool PredicateAtRangeStart = Predicate(Range.Start);
8306 
8307   for (ElementCount TmpVF = Range.Start * 2;
8308        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8309     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8310       Range.End = TmpVF;
8311       break;
8312     }
8313 
8314   return PredicateAtRangeStart;
8315 }
8316 
8317 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8318 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8319 /// of VF's starting at a given VF and extending it as much as possible. Each
8320 /// vectorization decision can potentially shorten this sub-range during
8321 /// buildVPlan().
8322 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8323                                            ElementCount MaxVF) {
8324   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8325   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8326     VFRange SubRange = {VF, MaxVFPlusOne};
8327     VPlans.push_back(buildVPlan(SubRange));
8328     VF = SubRange.End;
8329   }
8330 }
8331 
8332 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8333                                          VPlanPtr &Plan) {
8334   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8335 
8336   // Look for cached value.
8337   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8338   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8339   if (ECEntryIt != EdgeMaskCache.end())
8340     return ECEntryIt->second;
8341 
8342   VPValue *SrcMask = createBlockInMask(Src, Plan);
8343 
8344   // The terminator has to be a branch inst!
8345   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8346   assert(BI && "Unexpected terminator found");
8347 
8348   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8349     return EdgeMaskCache[Edge] = SrcMask;
8350 
8351   // If source is an exiting block, we know the exit edge is dynamically dead
8352   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8353   // adding uses of an otherwise potentially dead instruction.
8354   if (OrigLoop->isLoopExiting(Src))
8355     return EdgeMaskCache[Edge] = SrcMask;
8356 
8357   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8358   assert(EdgeMask && "No Edge Mask found for condition");
8359 
8360   if (BI->getSuccessor(0) != Dst)
8361     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8362 
8363   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8364     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8365     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8366     // The select version does not introduce new UB if SrcMask is false and
8367     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8368     VPValue *False = Plan->getOrAddVPValue(
8369         ConstantInt::getFalse(BI->getCondition()->getType()));
8370     EdgeMask =
8371         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8372   }
8373 
8374   return EdgeMaskCache[Edge] = EdgeMask;
8375 }
8376 
8377 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8378   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8379 
8380   // Look for cached value.
8381   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8382   if (BCEntryIt != BlockMaskCache.end())
8383     return BCEntryIt->second;
8384 
8385   // All-one mask is modelled as no-mask following the convention for masked
8386   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8387   VPValue *BlockMask = nullptr;
8388 
8389   if (OrigLoop->getHeader() == BB) {
8390     if (!CM.blockNeedsPredicationForAnyReason(BB))
8391       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8392 
8393     // Introduce the early-exit compare IV <= BTC to form header block mask.
8394     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8395     // constructing the desired canonical IV in the header block as its first
8396     // non-phi instructions.
8397     assert(CM.foldTailByMasking() && "must fold the tail");
8398     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8399     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8400     auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8401     HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
8402 
8403     VPBuilder::InsertPointGuard Guard(Builder);
8404     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8405     if (CM.TTI.emitGetActiveLaneMask()) {
8406       VPValue *TC = Plan->getOrCreateTripCount();
8407       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8408     } else {
8409       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8410       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8411     }
8412     return BlockMaskCache[BB] = BlockMask;
8413   }
8414 
8415   // This is the block mask. We OR all incoming edges.
8416   for (auto *Predecessor : predecessors(BB)) {
8417     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8418     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8419       return BlockMaskCache[BB] = EdgeMask;
8420 
8421     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8422       BlockMask = EdgeMask;
8423       continue;
8424     }
8425 
8426     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8427   }
8428 
8429   return BlockMaskCache[BB] = BlockMask;
8430 }
8431 
8432 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8433                                                 ArrayRef<VPValue *> Operands,
8434                                                 VFRange &Range,
8435                                                 VPlanPtr &Plan) {
8436   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8437          "Must be called with either a load or store");
8438 
8439   auto willWiden = [&](ElementCount VF) -> bool {
8440     if (VF.isScalar())
8441       return false;
8442     LoopVectorizationCostModel::InstWidening Decision =
8443         CM.getWideningDecision(I, VF);
8444     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8445            "CM decision should be taken at this point.");
8446     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8447       return true;
8448     if (CM.isScalarAfterVectorization(I, VF) ||
8449         CM.isProfitableToScalarize(I, VF))
8450       return false;
8451     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8452   };
8453 
8454   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8455     return nullptr;
8456 
8457   VPValue *Mask = nullptr;
8458   if (Legal->isMaskRequired(I))
8459     Mask = createBlockInMask(I->getParent(), Plan);
8460 
8461   // Determine if the pointer operand of the access is either consecutive or
8462   // reverse consecutive.
8463   LoopVectorizationCostModel::InstWidening Decision =
8464       CM.getWideningDecision(I, Range.Start);
8465   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8466   bool Consecutive =
8467       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8468 
8469   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8470     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8471                                               Consecutive, Reverse);
8472 
8473   StoreInst *Store = cast<StoreInst>(I);
8474   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8475                                             Mask, Consecutive, Reverse);
8476 }
8477 
8478 static VPWidenIntOrFpInductionRecipe *
8479 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc,
8480                            VPValue *Start, const InductionDescriptor &IndDesc,
8481                            LoopVectorizationCostModel &CM, Loop &OrigLoop,
8482                            VFRange &Range) {
8483   // Returns true if an instruction \p I should be scalarized instead of
8484   // vectorized for the chosen vectorization factor.
8485   auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
8486     return CM.isScalarAfterVectorization(I, VF) ||
8487            CM.isProfitableToScalarize(I, VF);
8488   };
8489 
8490   bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange(
8491       [&](ElementCount VF) {
8492         // Returns true if we should generate a scalar version of \p IV.
8493         if (ShouldScalarizeInstruction(PhiOrTrunc, VF))
8494           return true;
8495         auto isScalarInst = [&](User *U) -> bool {
8496           auto *I = cast<Instruction>(U);
8497           return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF);
8498         };
8499         return any_of(PhiOrTrunc->users(), isScalarInst);
8500       },
8501       Range);
8502   bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
8503       [&](ElementCount VF) {
8504         return ShouldScalarizeInstruction(PhiOrTrunc, VF);
8505       },
8506       Range);
8507   assert(IndDesc.getStartValue() ==
8508          Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
8509   if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8510     return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, TruncI,
8511                                              NeedsScalarIV, !NeedsScalarIVOnly);
8512   }
8513   assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
8514   return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV,
8515                                            !NeedsScalarIVOnly);
8516 }
8517 
8518 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
8519     PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const {
8520 
8521   // Check if this is an integer or fp induction. If so, build the recipe that
8522   // produces its scalar and vector values.
8523   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
8524     return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, *OrigLoop,
8525                                       Range);
8526 
8527   return nullptr;
8528 }
8529 
8530 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8531     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8532     VPlan &Plan) const {
8533   // Optimize the special case where the source is a constant integer
8534   // induction variable. Notice that we can only optimize the 'trunc' case
8535   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8536   // (c) other casts depend on pointer size.
8537 
8538   // Determine whether \p K is a truncation based on an induction variable that
8539   // can be optimized.
8540   auto isOptimizableIVTruncate =
8541       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8542     return [=](ElementCount VF) -> bool {
8543       return CM.isOptimizableIVTruncate(K, VF);
8544     };
8545   };
8546 
8547   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8548           isOptimizableIVTruncate(I), Range)) {
8549 
8550     auto *Phi = cast<PHINode>(I->getOperand(0));
8551     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8552     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8553     return createWidenInductionRecipe(Phi, I, Start, II, CM, *OrigLoop, Range);
8554   }
8555   return nullptr;
8556 }
8557 
8558 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8559                                                 ArrayRef<VPValue *> Operands,
8560                                                 VPlanPtr &Plan) {
8561   // If all incoming values are equal, the incoming VPValue can be used directly
8562   // instead of creating a new VPBlendRecipe.
8563   VPValue *FirstIncoming = Operands[0];
8564   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8565         return FirstIncoming == Inc;
8566       })) {
8567     return Operands[0];
8568   }
8569 
8570   // We know that all PHIs in non-header blocks are converted into selects, so
8571   // we don't have to worry about the insertion order and we can just use the
8572   // builder. At this point we generate the predication tree. There may be
8573   // duplications since this is a simple recursive scan, but future
8574   // optimizations will clean it up.
8575   SmallVector<VPValue *, 2> OperandsWithMask;
8576   unsigned NumIncoming = Phi->getNumIncomingValues();
8577 
8578   for (unsigned In = 0; In < NumIncoming; In++) {
8579     VPValue *EdgeMask =
8580       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8581     assert((EdgeMask || NumIncoming == 1) &&
8582            "Multiple predecessors with one having a full mask");
8583     OperandsWithMask.push_back(Operands[In]);
8584     if (EdgeMask)
8585       OperandsWithMask.push_back(EdgeMask);
8586   }
8587   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8588 }
8589 
8590 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8591                                                    ArrayRef<VPValue *> Operands,
8592                                                    VFRange &Range) const {
8593 
8594   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8595       [this, CI](ElementCount VF) {
8596         return CM.isScalarWithPredication(CI, VF);
8597       },
8598       Range);
8599 
8600   if (IsPredicated)
8601     return nullptr;
8602 
8603   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8604   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8605              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8606              ID == Intrinsic::pseudoprobe ||
8607              ID == Intrinsic::experimental_noalias_scope_decl))
8608     return nullptr;
8609 
8610   auto willWiden = [&](ElementCount VF) -> bool {
8611     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8612     // The following case may be scalarized depending on the VF.
8613     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8614     // version of the instruction.
8615     // Is it beneficial to perform intrinsic call compared to lib call?
8616     bool NeedToScalarize = false;
8617     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8618     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8619     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8620     return UseVectorIntrinsic || !NeedToScalarize;
8621   };
8622 
8623   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8624     return nullptr;
8625 
8626   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8627   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8628 }
8629 
8630 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8631   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8632          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8633   // Instruction should be widened, unless it is scalar after vectorization,
8634   // scalarization is profitable or it is predicated.
8635   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8636     return CM.isScalarAfterVectorization(I, VF) ||
8637            CM.isProfitableToScalarize(I, VF) ||
8638            CM.isScalarWithPredication(I, VF);
8639   };
8640   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8641                                                              Range);
8642 }
8643 
8644 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8645                                            ArrayRef<VPValue *> Operands) const {
8646   auto IsVectorizableOpcode = [](unsigned Opcode) {
8647     switch (Opcode) {
8648     case Instruction::Add:
8649     case Instruction::And:
8650     case Instruction::AShr:
8651     case Instruction::BitCast:
8652     case Instruction::FAdd:
8653     case Instruction::FCmp:
8654     case Instruction::FDiv:
8655     case Instruction::FMul:
8656     case Instruction::FNeg:
8657     case Instruction::FPExt:
8658     case Instruction::FPToSI:
8659     case Instruction::FPToUI:
8660     case Instruction::FPTrunc:
8661     case Instruction::FRem:
8662     case Instruction::FSub:
8663     case Instruction::ICmp:
8664     case Instruction::IntToPtr:
8665     case Instruction::LShr:
8666     case Instruction::Mul:
8667     case Instruction::Or:
8668     case Instruction::PtrToInt:
8669     case Instruction::SDiv:
8670     case Instruction::Select:
8671     case Instruction::SExt:
8672     case Instruction::Shl:
8673     case Instruction::SIToFP:
8674     case Instruction::SRem:
8675     case Instruction::Sub:
8676     case Instruction::Trunc:
8677     case Instruction::UDiv:
8678     case Instruction::UIToFP:
8679     case Instruction::URem:
8680     case Instruction::Xor:
8681     case Instruction::ZExt:
8682       return true;
8683     }
8684     return false;
8685   };
8686 
8687   if (!IsVectorizableOpcode(I->getOpcode()))
8688     return nullptr;
8689 
8690   // Success: widen this instruction.
8691   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8692 }
8693 
8694 void VPRecipeBuilder::fixHeaderPhis() {
8695   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8696   for (VPHeaderPHIRecipe *R : PhisToFix) {
8697     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8698     VPRecipeBase *IncR =
8699         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8700     R->addOperand(IncR->getVPSingleValue());
8701   }
8702 }
8703 
8704 VPBasicBlock *VPRecipeBuilder::handleReplication(
8705     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8706     VPlanPtr &Plan) {
8707   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8708       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8709       Range);
8710 
8711   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8712       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8713       Range);
8714 
8715   // Even if the instruction is not marked as uniform, there are certain
8716   // intrinsic calls that can be effectively treated as such, so we check for
8717   // them here. Conservatively, we only do this for scalable vectors, since
8718   // for fixed-width VFs we can always fall back on full scalarization.
8719   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8720     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8721     case Intrinsic::assume:
8722     case Intrinsic::lifetime_start:
8723     case Intrinsic::lifetime_end:
8724       // For scalable vectors if one of the operands is variant then we still
8725       // want to mark as uniform, which will generate one instruction for just
8726       // the first lane of the vector. We can't scalarize the call in the same
8727       // way as for fixed-width vectors because we don't know how many lanes
8728       // there are.
8729       //
8730       // The reasons for doing it this way for scalable vectors are:
8731       //   1. For the assume intrinsic generating the instruction for the first
8732       //      lane is still be better than not generating any at all. For
8733       //      example, the input may be a splat across all lanes.
8734       //   2. For the lifetime start/end intrinsics the pointer operand only
8735       //      does anything useful when the input comes from a stack object,
8736       //      which suggests it should always be uniform. For non-stack objects
8737       //      the effect is to poison the object, which still allows us to
8738       //      remove the call.
8739       IsUniform = true;
8740       break;
8741     default:
8742       break;
8743     }
8744   }
8745 
8746   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8747                                        IsUniform, IsPredicated);
8748   setRecipe(I, Recipe);
8749   Plan->addVPValue(I, Recipe);
8750 
8751   // Find if I uses a predicated instruction. If so, it will use its scalar
8752   // value. Avoid hoisting the insert-element which packs the scalar value into
8753   // a vector value, as that happens iff all users use the vector value.
8754   for (VPValue *Op : Recipe->operands()) {
8755     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8756     if (!PredR)
8757       continue;
8758     auto *RepR =
8759         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8760     assert(RepR->isPredicated() &&
8761            "expected Replicate recipe to be predicated");
8762     RepR->setAlsoPack(false);
8763   }
8764 
8765   // Finalize the recipe for Instr, first if it is not predicated.
8766   if (!IsPredicated) {
8767     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8768     VPBB->appendRecipe(Recipe);
8769     return VPBB;
8770   }
8771   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8772 
8773   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8774   assert(SingleSucc && "VPBB must have a single successor when handling "
8775                        "predicated replication.");
8776   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8777   // Record predicated instructions for above packing optimizations.
8778   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8779   VPBlockUtils::insertBlockAfter(Region, VPBB);
8780   auto *RegSucc = new VPBasicBlock();
8781   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8782   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8783   return RegSucc;
8784 }
8785 
8786 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8787                                                       VPRecipeBase *PredRecipe,
8788                                                       VPlanPtr &Plan) {
8789   // Instructions marked for predication are replicated and placed under an
8790   // if-then construct to prevent side-effects.
8791 
8792   // Generate recipes to compute the block mask for this region.
8793   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8794 
8795   // Build the triangular if-then region.
8796   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8797   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8798   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8799   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8800   auto *PHIRecipe = Instr->getType()->isVoidTy()
8801                         ? nullptr
8802                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8803   if (PHIRecipe) {
8804     Plan->removeVPValueFor(Instr);
8805     Plan->addVPValue(Instr, PHIRecipe);
8806   }
8807   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8808   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8809   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8810 
8811   // Note: first set Entry as region entry and then connect successors starting
8812   // from it in order, to propagate the "parent" of each VPBasicBlock.
8813   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8814   VPBlockUtils::connectBlocks(Pred, Exit);
8815 
8816   return Region;
8817 }
8818 
8819 VPRecipeOrVPValueTy
8820 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8821                                         ArrayRef<VPValue *> Operands,
8822                                         VFRange &Range, VPlanPtr &Plan) {
8823   // First, check for specific widening recipes that deal with calls, memory
8824   // operations, inductions and Phi nodes.
8825   if (auto *CI = dyn_cast<CallInst>(Instr))
8826     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8827 
8828   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8829     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8830 
8831   VPRecipeBase *Recipe;
8832   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8833     if (Phi->getParent() != OrigLoop->getHeader())
8834       return tryToBlend(Phi, Operands, Plan);
8835     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8836       return toVPRecipeResult(Recipe);
8837 
8838     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8839     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8840       VPValue *StartV = Operands[0];
8841       if (Legal->isReductionVariable(Phi)) {
8842         const RecurrenceDescriptor &RdxDesc =
8843             Legal->getReductionVars().find(Phi)->second;
8844         assert(RdxDesc.getRecurrenceStartValue() ==
8845                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8846         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8847                                              CM.isInLoopReduction(Phi),
8848                                              CM.useOrderedReductions(RdxDesc));
8849       } else {
8850         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8851       }
8852 
8853       // Record the incoming value from the backedge, so we can add the incoming
8854       // value from the backedge after all recipes have been created.
8855       recordRecipeOf(cast<Instruction>(
8856           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8857       PhisToFix.push_back(PhiRecipe);
8858     } else {
8859       // TODO: record backedge value for remaining pointer induction phis.
8860       assert(Phi->getType()->isPointerTy() &&
8861              "only pointer phis should be handled here");
8862       assert(Legal->getInductionVars().count(Phi) &&
8863              "Not an induction variable");
8864       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8865       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8866       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8867     }
8868 
8869     return toVPRecipeResult(PhiRecipe);
8870   }
8871 
8872   if (isa<TruncInst>(Instr) &&
8873       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8874                                                Range, *Plan)))
8875     return toVPRecipeResult(Recipe);
8876 
8877   if (!shouldWiden(Instr, Range))
8878     return nullptr;
8879 
8880   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8881     return toVPRecipeResult(new VPWidenGEPRecipe(
8882         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8883 
8884   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8885     bool InvariantCond =
8886         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8887     return toVPRecipeResult(new VPWidenSelectRecipe(
8888         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8889   }
8890 
8891   return toVPRecipeResult(tryToWiden(Instr, Operands));
8892 }
8893 
8894 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8895                                                         ElementCount MaxVF) {
8896   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8897 
8898   // Collect instructions from the original loop that will become trivially dead
8899   // in the vectorized loop. We don't need to vectorize these instructions. For
8900   // example, original induction update instructions can become dead because we
8901   // separately emit induction "steps" when generating code for the new loop.
8902   // Similarly, we create a new latch condition when setting up the structure
8903   // of the new loop, so the old one can become dead.
8904   SmallPtrSet<Instruction *, 4> DeadInstructions;
8905   collectTriviallyDeadInstructions(DeadInstructions);
8906 
8907   // Add assume instructions we need to drop to DeadInstructions, to prevent
8908   // them from being added to the VPlan.
8909   // TODO: We only need to drop assumes in blocks that get flattend. If the
8910   // control flow is preserved, we should keep them.
8911   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8912   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8913 
8914   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8915   // Dead instructions do not need sinking. Remove them from SinkAfter.
8916   for (Instruction *I : DeadInstructions)
8917     SinkAfter.erase(I);
8918 
8919   // Cannot sink instructions after dead instructions (there won't be any
8920   // recipes for them). Instead, find the first non-dead previous instruction.
8921   for (auto &P : Legal->getSinkAfter()) {
8922     Instruction *SinkTarget = P.second;
8923     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8924     (void)FirstInst;
8925     while (DeadInstructions.contains(SinkTarget)) {
8926       assert(
8927           SinkTarget != FirstInst &&
8928           "Must find a live instruction (at least the one feeding the "
8929           "first-order recurrence PHI) before reaching beginning of the block");
8930       SinkTarget = SinkTarget->getPrevNode();
8931       assert(SinkTarget != P.first &&
8932              "sink source equals target, no sinking required");
8933     }
8934     P.second = SinkTarget;
8935   }
8936 
8937   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8938   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8939     VFRange SubRange = {VF, MaxVFPlusOne};
8940     VPlans.push_back(
8941         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8942     VF = SubRange.End;
8943   }
8944 }
8945 
8946 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8947 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8948 // BranchOnCount VPInstruction to the latch.
8949 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8950                                   bool HasNUW, bool IsVPlanNative) {
8951   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8952   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8953 
8954   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8955   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8956   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8957   if (IsVPlanNative)
8958     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8959   Header->insert(CanonicalIVPHI, Header->begin());
8960 
8961   auto *CanonicalIVIncrement =
8962       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8963                                : VPInstruction::CanonicalIVIncrement,
8964                         {CanonicalIVPHI}, DL);
8965   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8966 
8967   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8968   if (IsVPlanNative) {
8969     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8970     EB->setCondBit(nullptr);
8971   }
8972   EB->appendRecipe(CanonicalIVIncrement);
8973 
8974   auto *BranchOnCount =
8975       new VPInstruction(VPInstruction::BranchOnCount,
8976                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8977   EB->appendRecipe(BranchOnCount);
8978 }
8979 
8980 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8981     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8982     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8983 
8984   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8985 
8986   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8987 
8988   // ---------------------------------------------------------------------------
8989   // Pre-construction: record ingredients whose recipes we'll need to further
8990   // process after constructing the initial VPlan.
8991   // ---------------------------------------------------------------------------
8992 
8993   // Mark instructions we'll need to sink later and their targets as
8994   // ingredients whose recipe we'll need to record.
8995   for (auto &Entry : SinkAfter) {
8996     RecipeBuilder.recordRecipeOf(Entry.first);
8997     RecipeBuilder.recordRecipeOf(Entry.second);
8998   }
8999   for (auto &Reduction : CM.getInLoopReductionChains()) {
9000     PHINode *Phi = Reduction.first;
9001     RecurKind Kind =
9002         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
9003     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9004 
9005     RecipeBuilder.recordRecipeOf(Phi);
9006     for (auto &R : ReductionOperations) {
9007       RecipeBuilder.recordRecipeOf(R);
9008       // For min/max reducitons, where we have a pair of icmp/select, we also
9009       // need to record the ICmp recipe, so it can be removed later.
9010       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9011              "Only min/max recurrences allowed for inloop reductions");
9012       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9013         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9014     }
9015   }
9016 
9017   // For each interleave group which is relevant for this (possibly trimmed)
9018   // Range, add it to the set of groups to be later applied to the VPlan and add
9019   // placeholders for its members' Recipes which we'll be replacing with a
9020   // single VPInterleaveRecipe.
9021   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9022     auto applyIG = [IG, this](ElementCount VF) -> bool {
9023       return (VF.isVector() && // Query is illegal for VF == 1
9024               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9025                   LoopVectorizationCostModel::CM_Interleave);
9026     };
9027     if (!getDecisionAndClampRange(applyIG, Range))
9028       continue;
9029     InterleaveGroups.insert(IG);
9030     for (unsigned i = 0; i < IG->getFactor(); i++)
9031       if (Instruction *Member = IG->getMember(i))
9032         RecipeBuilder.recordRecipeOf(Member);
9033   };
9034 
9035   // ---------------------------------------------------------------------------
9036   // Build initial VPlan: Scan the body of the loop in a topological order to
9037   // visit each basic block after having visited its predecessor basic blocks.
9038   // ---------------------------------------------------------------------------
9039 
9040   // Create initial VPlan skeleton, with separate header and latch blocks.
9041   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
9042   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
9043   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
9044   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
9045   auto Plan = std::make_unique<VPlan>(TopRegion);
9046 
9047   Instruction *DLInst =
9048       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
9049   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
9050                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
9051                         !CM.foldTailByMasking(), false);
9052 
9053   // Scan the body of the loop in a topological order to visit each basic block
9054   // after having visited its predecessor basic blocks.
9055   LoopBlocksDFS DFS(OrigLoop);
9056   DFS.perform(LI);
9057 
9058   VPBasicBlock *VPBB = HeaderVPBB;
9059   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
9060   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9061     // Relevant instructions from basic block BB will be grouped into VPRecipe
9062     // ingredients and fill a new VPBasicBlock.
9063     unsigned VPBBsForBB = 0;
9064     VPBB->setName(BB->getName());
9065     Builder.setInsertPoint(VPBB);
9066 
9067     // Introduce each ingredient into VPlan.
9068     // TODO: Model and preserve debug instrinsics in VPlan.
9069     for (Instruction &I : BB->instructionsWithoutDebug()) {
9070       Instruction *Instr = &I;
9071 
9072       // First filter out irrelevant instructions, to ensure no recipes are
9073       // built for them.
9074       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9075         continue;
9076 
9077       SmallVector<VPValue *, 4> Operands;
9078       auto *Phi = dyn_cast<PHINode>(Instr);
9079       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9080         Operands.push_back(Plan->getOrAddVPValue(
9081             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9082       } else {
9083         auto OpRange = Plan->mapToVPValues(Instr->operands());
9084         Operands = {OpRange.begin(), OpRange.end()};
9085       }
9086       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9087               Instr, Operands, Range, Plan)) {
9088         // If Instr can be simplified to an existing VPValue, use it.
9089         if (RecipeOrValue.is<VPValue *>()) {
9090           auto *VPV = RecipeOrValue.get<VPValue *>();
9091           Plan->addVPValue(Instr, VPV);
9092           // If the re-used value is a recipe, register the recipe for the
9093           // instruction, in case the recipe for Instr needs to be recorded.
9094           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9095             RecipeBuilder.setRecipe(Instr, R);
9096           continue;
9097         }
9098         // Otherwise, add the new recipe.
9099         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9100         for (auto *Def : Recipe->definedValues()) {
9101           auto *UV = Def->getUnderlyingValue();
9102           Plan->addVPValue(UV, Def);
9103         }
9104 
9105         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
9106             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
9107           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
9108           // of the header block. That can happen for truncates of induction
9109           // variables. Those recipes are moved to the phi section of the header
9110           // block after applying SinkAfter, which relies on the original
9111           // position of the trunc.
9112           assert(isa<TruncInst>(Instr));
9113           InductionsToMove.push_back(
9114               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
9115         }
9116         RecipeBuilder.setRecipe(Instr, Recipe);
9117         VPBB->appendRecipe(Recipe);
9118         continue;
9119       }
9120 
9121       // Otherwise, if all widening options failed, Instruction is to be
9122       // replicated. This may create a successor for VPBB.
9123       VPBasicBlock *NextVPBB =
9124           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9125       if (NextVPBB != VPBB) {
9126         VPBB = NextVPBB;
9127         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9128                                     : "");
9129       }
9130     }
9131 
9132     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
9133     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
9134   }
9135 
9136   // Fold the last, empty block into its predecessor.
9137   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
9138   assert(VPBB && "expected to fold last (empty) block");
9139   // After here, VPBB should not be used.
9140   VPBB = nullptr;
9141 
9142   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
9143          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
9144          "entry block must be set to a VPRegionBlock having a non-empty entry "
9145          "VPBasicBlock");
9146   RecipeBuilder.fixHeaderPhis();
9147 
9148   // ---------------------------------------------------------------------------
9149   // Transform initial VPlan: Apply previously taken decisions, in order, to
9150   // bring the VPlan to its final state.
9151   // ---------------------------------------------------------------------------
9152 
9153   // Apply Sink-After legal constraints.
9154   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9155     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9156     if (Region && Region->isReplicator()) {
9157       assert(Region->getNumSuccessors() == 1 &&
9158              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9159       assert(R->getParent()->size() == 1 &&
9160              "A recipe in an original replicator region must be the only "
9161              "recipe in its block");
9162       return Region;
9163     }
9164     return nullptr;
9165   };
9166   for (auto &Entry : SinkAfter) {
9167     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9168     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9169 
9170     auto *TargetRegion = GetReplicateRegion(Target);
9171     auto *SinkRegion = GetReplicateRegion(Sink);
9172     if (!SinkRegion) {
9173       // If the sink source is not a replicate region, sink the recipe directly.
9174       if (TargetRegion) {
9175         // The target is in a replication region, make sure to move Sink to
9176         // the block after it, not into the replication region itself.
9177         VPBasicBlock *NextBlock =
9178             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9179         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9180       } else
9181         Sink->moveAfter(Target);
9182       continue;
9183     }
9184 
9185     // The sink source is in a replicate region. Unhook the region from the CFG.
9186     auto *SinkPred = SinkRegion->getSinglePredecessor();
9187     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9188     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9189     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9190     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9191 
9192     if (TargetRegion) {
9193       // The target recipe is also in a replicate region, move the sink region
9194       // after the target region.
9195       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9196       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9197       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9198       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9199     } else {
9200       // The sink source is in a replicate region, we need to move the whole
9201       // replicate region, which should only contain a single recipe in the
9202       // main block.
9203       auto *SplitBlock =
9204           Target->getParent()->splitAt(std::next(Target->getIterator()));
9205 
9206       auto *SplitPred = SplitBlock->getSinglePredecessor();
9207 
9208       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9209       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9210       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9211     }
9212   }
9213 
9214   VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
9215   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9216 
9217   // Now that sink-after is done, move induction recipes for optimized truncates
9218   // to the phi section of the header block.
9219   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9220     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9221 
9222   // Adjust the recipes for any inloop reductions.
9223   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9224                              RecipeBuilder, Range.Start);
9225 
9226   // Introduce a recipe to combine the incoming and previous values of a
9227   // first-order recurrence.
9228   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9229     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9230     if (!RecurPhi)
9231       continue;
9232 
9233     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9234     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9235     auto *Region = GetReplicateRegion(PrevRecipe);
9236     if (Region)
9237       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9238     if (Region || PrevRecipe->isPhi())
9239       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9240     else
9241       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9242 
9243     auto *RecurSplice = cast<VPInstruction>(
9244         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9245                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9246 
9247     RecurPhi->replaceAllUsesWith(RecurSplice);
9248     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9249     // all users.
9250     RecurSplice->setOperand(0, RecurPhi);
9251   }
9252 
9253   // Interleave memory: for each Interleave Group we marked earlier as relevant
9254   // for this VPlan, replace the Recipes widening its memory instructions with a
9255   // single VPInterleaveRecipe at its insertion point.
9256   for (auto IG : InterleaveGroups) {
9257     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9258         RecipeBuilder.getRecipe(IG->getInsertPos()));
9259     SmallVector<VPValue *, 4> StoredValues;
9260     for (unsigned i = 0; i < IG->getFactor(); ++i)
9261       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9262         auto *StoreR =
9263             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9264         StoredValues.push_back(StoreR->getStoredValue());
9265       }
9266 
9267     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9268                                         Recipe->getMask());
9269     VPIG->insertBefore(Recipe);
9270     unsigned J = 0;
9271     for (unsigned i = 0; i < IG->getFactor(); ++i)
9272       if (Instruction *Member = IG->getMember(i)) {
9273         if (!Member->getType()->isVoidTy()) {
9274           VPValue *OriginalV = Plan->getVPValue(Member);
9275           Plan->removeVPValueFor(Member);
9276           Plan->addVPValue(Member, VPIG->getVPValue(J));
9277           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9278           J++;
9279         }
9280         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9281       }
9282   }
9283 
9284   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9285   // in ways that accessing values using original IR values is incorrect.
9286   Plan->disableValue2VPValue();
9287 
9288   VPlanTransforms::sinkScalarOperands(*Plan);
9289   VPlanTransforms::mergeReplicateRegions(*Plan);
9290 
9291   std::string PlanName;
9292   raw_string_ostream RSO(PlanName);
9293   ElementCount VF = Range.Start;
9294   Plan->addVF(VF);
9295   RSO << "Initial VPlan for VF={" << VF;
9296   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9297     Plan->addVF(VF);
9298     RSO << "," << VF;
9299   }
9300   RSO << "},UF>=1";
9301   RSO.flush();
9302   Plan->setName(PlanName);
9303 
9304   // Fold Exit block into its predecessor if possible.
9305   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9306   // VPBasicBlock as exit.
9307   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9308 
9309   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9310   return Plan;
9311 }
9312 
9313 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9314   // Outer loop handling: They may require CFG and instruction level
9315   // transformations before even evaluating whether vectorization is profitable.
9316   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9317   // the vectorization pipeline.
9318   assert(!OrigLoop->isInnermost());
9319   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9320 
9321   // Create new empty VPlan
9322   auto Plan = std::make_unique<VPlan>();
9323 
9324   // Build hierarchical CFG
9325   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9326   HCFGBuilder.buildHierarchicalCFG();
9327 
9328   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9329        VF *= 2)
9330     Plan->addVF(VF);
9331 
9332   if (EnableVPlanPredication) {
9333     VPlanPredicator VPP(*Plan);
9334     VPP.predicate();
9335 
9336     // Avoid running transformation to recipes until masked code generation in
9337     // VPlan-native path is in place.
9338     return Plan;
9339   }
9340 
9341   SmallPtrSet<Instruction *, 1> DeadInstructions;
9342   VPlanTransforms::VPInstructionsToVPRecipes(
9343       OrigLoop, Plan,
9344       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9345       DeadInstructions, *PSE.getSE());
9346 
9347   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9348                         true, true);
9349   return Plan;
9350 }
9351 
9352 // Adjust the recipes for reductions. For in-loop reductions the chain of
9353 // instructions leading from the loop exit instr to the phi need to be converted
9354 // to reductions, with one operand being vector and the other being the scalar
9355 // reduction chain. For other reductions, a select is introduced between the phi
9356 // and live-out recipes when folding the tail.
9357 void LoopVectorizationPlanner::adjustRecipesForReductions(
9358     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9359     ElementCount MinVF) {
9360   for (auto &Reduction : CM.getInLoopReductionChains()) {
9361     PHINode *Phi = Reduction.first;
9362     const RecurrenceDescriptor &RdxDesc =
9363         Legal->getReductionVars().find(Phi)->second;
9364     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9365 
9366     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9367       continue;
9368 
9369     // ReductionOperations are orders top-down from the phi's use to the
9370     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9371     // which of the two operands will remain scalar and which will be reduced.
9372     // For minmax the chain will be the select instructions.
9373     Instruction *Chain = Phi;
9374     for (Instruction *R : ReductionOperations) {
9375       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9376       RecurKind Kind = RdxDesc.getRecurrenceKind();
9377 
9378       VPValue *ChainOp = Plan->getVPValue(Chain);
9379       unsigned FirstOpId;
9380       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9381              "Only min/max recurrences allowed for inloop reductions");
9382       // Recognize a call to the llvm.fmuladd intrinsic.
9383       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9384       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9385              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9386       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9387         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9388                "Expected to replace a VPWidenSelectSC");
9389         FirstOpId = 1;
9390       } else {
9391         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9392                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9393                "Expected to replace a VPWidenSC");
9394         FirstOpId = 0;
9395       }
9396       unsigned VecOpId =
9397           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9398       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9399 
9400       auto *CondOp = CM.foldTailByMasking()
9401                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9402                          : nullptr;
9403 
9404       if (IsFMulAdd) {
9405         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9406         // need to create an fmul recipe to use as the vector operand for the
9407         // fadd reduction.
9408         VPInstruction *FMulRecipe = new VPInstruction(
9409             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9410         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9411         WidenRecipe->getParent()->insert(FMulRecipe,
9412                                          WidenRecipe->getIterator());
9413         VecOp = FMulRecipe;
9414       }
9415       VPReductionRecipe *RedRecipe =
9416           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9417       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9418       Plan->removeVPValueFor(R);
9419       Plan->addVPValue(R, RedRecipe);
9420       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9421       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9422       WidenRecipe->eraseFromParent();
9423 
9424       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9425         VPRecipeBase *CompareRecipe =
9426             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9427         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9428                "Expected to replace a VPWidenSC");
9429         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9430                "Expected no remaining users");
9431         CompareRecipe->eraseFromParent();
9432       }
9433       Chain = R;
9434     }
9435   }
9436 
9437   // If tail is folded by masking, introduce selects between the phi
9438   // and the live-out instruction of each reduction, at the beginning of the
9439   // dedicated latch block.
9440   if (CM.foldTailByMasking()) {
9441     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9442     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9443       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9444       if (!PhiR || PhiR->isInLoop())
9445         continue;
9446       VPValue *Cond =
9447           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9448       VPValue *Red = PhiR->getBackedgeValue();
9449       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9450              "reduction recipe must be defined before latch");
9451       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9452     }
9453   }
9454 }
9455 
9456 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9457 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9458                                VPSlotTracker &SlotTracker) const {
9459   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9460   IG->getInsertPos()->printAsOperand(O, false);
9461   O << ", ";
9462   getAddr()->printAsOperand(O, SlotTracker);
9463   VPValue *Mask = getMask();
9464   if (Mask) {
9465     O << ", ";
9466     Mask->printAsOperand(O, SlotTracker);
9467   }
9468 
9469   unsigned OpIdx = 0;
9470   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9471     if (!IG->getMember(i))
9472       continue;
9473     if (getNumStoreOperands() > 0) {
9474       O << "\n" << Indent << "  store ";
9475       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9476       O << " to index " << i;
9477     } else {
9478       O << "\n" << Indent << "  ";
9479       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9480       O << " = load from index " << i;
9481     }
9482     ++OpIdx;
9483   }
9484 }
9485 #endif
9486 
9487 void VPWidenCallRecipe::execute(VPTransformState &State) {
9488   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9489                                   *this, State);
9490 }
9491 
9492 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9493   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9494   State.ILV->setDebugLocFromInst(&I);
9495 
9496   // The condition can be loop invariant  but still defined inside the
9497   // loop. This means that we can't just use the original 'cond' value.
9498   // We have to take the 'vectorized' value and pick the first lane.
9499   // Instcombine will make this a no-op.
9500   auto *InvarCond =
9501       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9502 
9503   for (unsigned Part = 0; Part < State.UF; ++Part) {
9504     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9505     Value *Op0 = State.get(getOperand(1), Part);
9506     Value *Op1 = State.get(getOperand(2), Part);
9507     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9508     State.set(this, Sel, Part);
9509     State.ILV->addMetadata(Sel, &I);
9510   }
9511 }
9512 
9513 void VPWidenRecipe::execute(VPTransformState &State) {
9514   auto &I = *cast<Instruction>(getUnderlyingValue());
9515   auto &Builder = State.Builder;
9516   switch (I.getOpcode()) {
9517   case Instruction::Call:
9518   case Instruction::Br:
9519   case Instruction::PHI:
9520   case Instruction::GetElementPtr:
9521   case Instruction::Select:
9522     llvm_unreachable("This instruction is handled by a different recipe.");
9523   case Instruction::UDiv:
9524   case Instruction::SDiv:
9525   case Instruction::SRem:
9526   case Instruction::URem:
9527   case Instruction::Add:
9528   case Instruction::FAdd:
9529   case Instruction::Sub:
9530   case Instruction::FSub:
9531   case Instruction::FNeg:
9532   case Instruction::Mul:
9533   case Instruction::FMul:
9534   case Instruction::FDiv:
9535   case Instruction::FRem:
9536   case Instruction::Shl:
9537   case Instruction::LShr:
9538   case Instruction::AShr:
9539   case Instruction::And:
9540   case Instruction::Or:
9541   case Instruction::Xor: {
9542     // Just widen unops and binops.
9543     State.ILV->setDebugLocFromInst(&I);
9544 
9545     for (unsigned Part = 0; Part < State.UF; ++Part) {
9546       SmallVector<Value *, 2> Ops;
9547       for (VPValue *VPOp : operands())
9548         Ops.push_back(State.get(VPOp, Part));
9549 
9550       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9551 
9552       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9553         VecOp->copyIRFlags(&I);
9554 
9555         // If the instruction is vectorized and was in a basic block that needed
9556         // predication, we can't propagate poison-generating flags (nuw/nsw,
9557         // exact, etc.). The control flow has been linearized and the
9558         // instruction is no longer guarded by the predicate, which could make
9559         // the flag properties to no longer hold.
9560         if (State.MayGeneratePoisonRecipes.contains(this))
9561           VecOp->dropPoisonGeneratingFlags();
9562       }
9563 
9564       // Use this vector value for all users of the original instruction.
9565       State.set(this, V, Part);
9566       State.ILV->addMetadata(V, &I);
9567     }
9568 
9569     break;
9570   }
9571   case Instruction::ICmp:
9572   case Instruction::FCmp: {
9573     // Widen compares. Generate vector compares.
9574     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9575     auto *Cmp = cast<CmpInst>(&I);
9576     State.ILV->setDebugLocFromInst(Cmp);
9577     for (unsigned Part = 0; Part < State.UF; ++Part) {
9578       Value *A = State.get(getOperand(0), Part);
9579       Value *B = State.get(getOperand(1), Part);
9580       Value *C = nullptr;
9581       if (FCmp) {
9582         // Propagate fast math flags.
9583         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9584         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9585         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9586       } else {
9587         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9588       }
9589       State.set(this, C, Part);
9590       State.ILV->addMetadata(C, &I);
9591     }
9592 
9593     break;
9594   }
9595 
9596   case Instruction::ZExt:
9597   case Instruction::SExt:
9598   case Instruction::FPToUI:
9599   case Instruction::FPToSI:
9600   case Instruction::FPExt:
9601   case Instruction::PtrToInt:
9602   case Instruction::IntToPtr:
9603   case Instruction::SIToFP:
9604   case Instruction::UIToFP:
9605   case Instruction::Trunc:
9606   case Instruction::FPTrunc:
9607   case Instruction::BitCast: {
9608     auto *CI = cast<CastInst>(&I);
9609     State.ILV->setDebugLocFromInst(CI);
9610 
9611     /// Vectorize casts.
9612     Type *DestTy = (State.VF.isScalar())
9613                        ? CI->getType()
9614                        : VectorType::get(CI->getType(), State.VF);
9615 
9616     for (unsigned Part = 0; Part < State.UF; ++Part) {
9617       Value *A = State.get(getOperand(0), Part);
9618       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9619       State.set(this, Cast, Part);
9620       State.ILV->addMetadata(Cast, &I);
9621     }
9622     break;
9623   }
9624   default:
9625     // This instruction is not vectorized by simple widening.
9626     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9627     llvm_unreachable("Unhandled instruction!");
9628   } // end of switch.
9629 }
9630 
9631 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9632   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9633   // Construct a vector GEP by widening the operands of the scalar GEP as
9634   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9635   // results in a vector of pointers when at least one operand of the GEP
9636   // is vector-typed. Thus, to keep the representation compact, we only use
9637   // vector-typed operands for loop-varying values.
9638 
9639   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9640     // If we are vectorizing, but the GEP has only loop-invariant operands,
9641     // the GEP we build (by only using vector-typed operands for
9642     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9643     // produce a vector of pointers, we need to either arbitrarily pick an
9644     // operand to broadcast, or broadcast a clone of the original GEP.
9645     // Here, we broadcast a clone of the original.
9646     //
9647     // TODO: If at some point we decide to scalarize instructions having
9648     //       loop-invariant operands, this special case will no longer be
9649     //       required. We would add the scalarization decision to
9650     //       collectLoopScalars() and teach getVectorValue() to broadcast
9651     //       the lane-zero scalar value.
9652     auto *Clone = State.Builder.Insert(GEP->clone());
9653     for (unsigned Part = 0; Part < State.UF; ++Part) {
9654       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9655       State.set(this, EntryPart, Part);
9656       State.ILV->addMetadata(EntryPart, GEP);
9657     }
9658   } else {
9659     // If the GEP has at least one loop-varying operand, we are sure to
9660     // produce a vector of pointers. But if we are only unrolling, we want
9661     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9662     // produce with the code below will be scalar (if VF == 1) or vector
9663     // (otherwise). Note that for the unroll-only case, we still maintain
9664     // values in the vector mapping with initVector, as we do for other
9665     // instructions.
9666     for (unsigned Part = 0; Part < State.UF; ++Part) {
9667       // The pointer operand of the new GEP. If it's loop-invariant, we
9668       // won't broadcast it.
9669       auto *Ptr = IsPtrLoopInvariant
9670                       ? State.get(getOperand(0), VPIteration(0, 0))
9671                       : State.get(getOperand(0), Part);
9672 
9673       // Collect all the indices for the new GEP. If any index is
9674       // loop-invariant, we won't broadcast it.
9675       SmallVector<Value *, 4> Indices;
9676       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9677         VPValue *Operand = getOperand(I);
9678         if (IsIndexLoopInvariant[I - 1])
9679           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9680         else
9681           Indices.push_back(State.get(Operand, Part));
9682       }
9683 
9684       // If the GEP instruction is vectorized and was in a basic block that
9685       // needed predication, we can't propagate the poison-generating 'inbounds'
9686       // flag. The control flow has been linearized and the GEP is no longer
9687       // guarded by the predicate, which could make the 'inbounds' properties to
9688       // no longer hold.
9689       bool IsInBounds =
9690           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9691 
9692       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9693       // but it should be a vector, otherwise.
9694       auto *NewGEP = IsInBounds
9695                          ? State.Builder.CreateInBoundsGEP(
9696                                GEP->getSourceElementType(), Ptr, Indices)
9697                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9698                                                    Ptr, Indices);
9699       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9700              "NewGEP is not a pointer vector");
9701       State.set(this, NewGEP, Part);
9702       State.ILV->addMetadata(NewGEP, GEP);
9703     }
9704   }
9705 }
9706 
9707 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9708   assert(!State.Instance && "Int or FP induction being replicated.");
9709   auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9710   State.ILV->widenIntOrFpInduction(IV, this, State, CanonicalIV);
9711 }
9712 
9713 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9714   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9715                                  State);
9716 }
9717 
9718 void VPBlendRecipe::execute(VPTransformState &State) {
9719   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9720   // We know that all PHIs in non-header blocks are converted into
9721   // selects, so we don't have to worry about the insertion order and we
9722   // can just use the builder.
9723   // At this point we generate the predication tree. There may be
9724   // duplications since this is a simple recursive scan, but future
9725   // optimizations will clean it up.
9726 
9727   unsigned NumIncoming = getNumIncomingValues();
9728 
9729   // Generate a sequence of selects of the form:
9730   // SELECT(Mask3, In3,
9731   //        SELECT(Mask2, In2,
9732   //               SELECT(Mask1, In1,
9733   //                      In0)))
9734   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9735   // are essentially undef are taken from In0.
9736   InnerLoopVectorizer::VectorParts Entry(State.UF);
9737   for (unsigned In = 0; In < NumIncoming; ++In) {
9738     for (unsigned Part = 0; Part < State.UF; ++Part) {
9739       // We might have single edge PHIs (blocks) - use an identity
9740       // 'select' for the first PHI operand.
9741       Value *In0 = State.get(getIncomingValue(In), Part);
9742       if (In == 0)
9743         Entry[Part] = In0; // Initialize with the first incoming value.
9744       else {
9745         // Select between the current value and the previous incoming edge
9746         // based on the incoming mask.
9747         Value *Cond = State.get(getMask(In), Part);
9748         Entry[Part] =
9749             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9750       }
9751     }
9752   }
9753   for (unsigned Part = 0; Part < State.UF; ++Part)
9754     State.set(this, Entry[Part], Part);
9755 }
9756 
9757 void VPInterleaveRecipe::execute(VPTransformState &State) {
9758   assert(!State.Instance && "Interleave group being replicated.");
9759   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9760                                       getStoredValues(), getMask());
9761 }
9762 
9763 void VPReductionRecipe::execute(VPTransformState &State) {
9764   assert(!State.Instance && "Reduction being replicated.");
9765   Value *PrevInChain = State.get(getChainOp(), 0);
9766   RecurKind Kind = RdxDesc->getRecurrenceKind();
9767   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9768   // Propagate the fast-math flags carried by the underlying instruction.
9769   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9770   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9771   for (unsigned Part = 0; Part < State.UF; ++Part) {
9772     Value *NewVecOp = State.get(getVecOp(), Part);
9773     if (VPValue *Cond = getCondOp()) {
9774       Value *NewCond = State.get(Cond, Part);
9775       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9776       Value *Iden = RdxDesc->getRecurrenceIdentity(
9777           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9778       Value *IdenVec =
9779           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9780       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9781       NewVecOp = Select;
9782     }
9783     Value *NewRed;
9784     Value *NextInChain;
9785     if (IsOrdered) {
9786       if (State.VF.isVector())
9787         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9788                                         PrevInChain);
9789       else
9790         NewRed = State.Builder.CreateBinOp(
9791             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9792             NewVecOp);
9793       PrevInChain = NewRed;
9794     } else {
9795       PrevInChain = State.get(getChainOp(), Part);
9796       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9797     }
9798     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9799       NextInChain =
9800           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9801                          NewRed, PrevInChain);
9802     } else if (IsOrdered)
9803       NextInChain = NewRed;
9804     else
9805       NextInChain = State.Builder.CreateBinOp(
9806           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9807           PrevInChain);
9808     State.set(this, NextInChain, Part);
9809   }
9810 }
9811 
9812 void VPReplicateRecipe::execute(VPTransformState &State) {
9813   if (State.Instance) { // Generate a single instance.
9814     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9815     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9816                                     IsPredicated, State);
9817     // Insert scalar instance packing it into a vector.
9818     if (AlsoPack && State.VF.isVector()) {
9819       // If we're constructing lane 0, initialize to start from poison.
9820       if (State.Instance->Lane.isFirstLane()) {
9821         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9822         Value *Poison = PoisonValue::get(
9823             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9824         State.set(this, Poison, State.Instance->Part);
9825       }
9826       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9827     }
9828     return;
9829   }
9830 
9831   // Generate scalar instances for all VF lanes of all UF parts, unless the
9832   // instruction is uniform inwhich case generate only the first lane for each
9833   // of the UF parts.
9834   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9835   assert((!State.VF.isScalable() || IsUniform) &&
9836          "Can't scalarize a scalable vector");
9837   for (unsigned Part = 0; Part < State.UF; ++Part)
9838     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9839       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9840                                       VPIteration(Part, Lane), IsPredicated,
9841                                       State);
9842 }
9843 
9844 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9845   assert(State.Instance && "Branch on Mask works only on single instance.");
9846 
9847   unsigned Part = State.Instance->Part;
9848   unsigned Lane = State.Instance->Lane.getKnownLane();
9849 
9850   Value *ConditionBit = nullptr;
9851   VPValue *BlockInMask = getMask();
9852   if (BlockInMask) {
9853     ConditionBit = State.get(BlockInMask, Part);
9854     if (ConditionBit->getType()->isVectorTy())
9855       ConditionBit = State.Builder.CreateExtractElement(
9856           ConditionBit, State.Builder.getInt32(Lane));
9857   } else // Block in mask is all-one.
9858     ConditionBit = State.Builder.getTrue();
9859 
9860   // Replace the temporary unreachable terminator with a new conditional branch,
9861   // whose two destinations will be set later when they are created.
9862   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9863   assert(isa<UnreachableInst>(CurrentTerminator) &&
9864          "Expected to replace unreachable terminator with conditional branch.");
9865   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9866   CondBr->setSuccessor(0, nullptr);
9867   ReplaceInstWithInst(CurrentTerminator, CondBr);
9868 }
9869 
9870 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9871   assert(State.Instance && "Predicated instruction PHI works per instance.");
9872   Instruction *ScalarPredInst =
9873       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9874   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9875   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9876   assert(PredicatingBB && "Predicated block has no single predecessor.");
9877   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9878          "operand must be VPReplicateRecipe");
9879 
9880   // By current pack/unpack logic we need to generate only a single phi node: if
9881   // a vector value for the predicated instruction exists at this point it means
9882   // the instruction has vector users only, and a phi for the vector value is
9883   // needed. In this case the recipe of the predicated instruction is marked to
9884   // also do that packing, thereby "hoisting" the insert-element sequence.
9885   // Otherwise, a phi node for the scalar value is needed.
9886   unsigned Part = State.Instance->Part;
9887   if (State.hasVectorValue(getOperand(0), Part)) {
9888     Value *VectorValue = State.get(getOperand(0), Part);
9889     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9890     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9891     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9892     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9893     if (State.hasVectorValue(this, Part))
9894       State.reset(this, VPhi, Part);
9895     else
9896       State.set(this, VPhi, Part);
9897     // NOTE: Currently we need to update the value of the operand, so the next
9898     // predicated iteration inserts its generated value in the correct vector.
9899     State.reset(getOperand(0), VPhi, Part);
9900   } else {
9901     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9902     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9903     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9904                      PredicatingBB);
9905     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9906     if (State.hasScalarValue(this, *State.Instance))
9907       State.reset(this, Phi, *State.Instance);
9908     else
9909       State.set(this, Phi, *State.Instance);
9910     // NOTE: Currently we need to update the value of the operand, so the next
9911     // predicated iteration inserts its generated value in the correct vector.
9912     State.reset(getOperand(0), Phi, *State.Instance);
9913   }
9914 }
9915 
9916 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9917   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9918 
9919   // Attempt to issue a wide load.
9920   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9921   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9922 
9923   assert((LI || SI) && "Invalid Load/Store instruction");
9924   assert((!SI || StoredValue) && "No stored value provided for widened store");
9925   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9926 
9927   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9928 
9929   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9930   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9931   bool CreateGatherScatter = !Consecutive;
9932 
9933   auto &Builder = State.Builder;
9934   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9935   bool isMaskRequired = getMask();
9936   if (isMaskRequired)
9937     for (unsigned Part = 0; Part < State.UF; ++Part)
9938       BlockInMaskParts[Part] = State.get(getMask(), Part);
9939 
9940   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9941     // Calculate the pointer for the specific unroll-part.
9942     GetElementPtrInst *PartPtr = nullptr;
9943 
9944     bool InBounds = false;
9945     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9946       InBounds = gep->isInBounds();
9947     if (Reverse) {
9948       // If the address is consecutive but reversed, then the
9949       // wide store needs to start at the last vector element.
9950       // RunTimeVF =  VScale * VF.getKnownMinValue()
9951       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9952       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9953       // NumElt = -Part * RunTimeVF
9954       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9955       // LastLane = 1 - RunTimeVF
9956       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9957       PartPtr =
9958           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9959       PartPtr->setIsInBounds(InBounds);
9960       PartPtr = cast<GetElementPtrInst>(
9961           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9962       PartPtr->setIsInBounds(InBounds);
9963       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9964         BlockInMaskParts[Part] =
9965             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9966     } else {
9967       Value *Increment =
9968           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9969       PartPtr = cast<GetElementPtrInst>(
9970           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9971       PartPtr->setIsInBounds(InBounds);
9972     }
9973 
9974     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9975     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9976   };
9977 
9978   // Handle Stores:
9979   if (SI) {
9980     State.ILV->setDebugLocFromInst(SI);
9981 
9982     for (unsigned Part = 0; Part < State.UF; ++Part) {
9983       Instruction *NewSI = nullptr;
9984       Value *StoredVal = State.get(StoredValue, Part);
9985       if (CreateGatherScatter) {
9986         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9987         Value *VectorGep = State.get(getAddr(), Part);
9988         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9989                                             MaskPart);
9990       } else {
9991         if (Reverse) {
9992           // If we store to reverse consecutive memory locations, then we need
9993           // to reverse the order of elements in the stored value.
9994           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
9995           // We don't want to update the value in the map as it might be used in
9996           // another expression. So don't call resetVectorValue(StoredVal).
9997         }
9998         auto *VecPtr =
9999             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10000         if (isMaskRequired)
10001           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
10002                                             BlockInMaskParts[Part]);
10003         else
10004           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10005       }
10006       State.ILV->addMetadata(NewSI, SI);
10007     }
10008     return;
10009   }
10010 
10011   // Handle loads.
10012   assert(LI && "Must have a load instruction");
10013   State.ILV->setDebugLocFromInst(LI);
10014   for (unsigned Part = 0; Part < State.UF; ++Part) {
10015     Value *NewLI;
10016     if (CreateGatherScatter) {
10017       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10018       Value *VectorGep = State.get(getAddr(), Part);
10019       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10020                                          nullptr, "wide.masked.gather");
10021       State.ILV->addMetadata(NewLI, LI);
10022     } else {
10023       auto *VecPtr =
10024           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10025       if (isMaskRequired)
10026         NewLI = Builder.CreateMaskedLoad(
10027             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10028             PoisonValue::get(DataTy), "wide.masked.load");
10029       else
10030         NewLI =
10031             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10032 
10033       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10034       State.ILV->addMetadata(NewLI, LI);
10035       if (Reverse)
10036         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10037     }
10038 
10039     State.set(this, NewLI, Part);
10040   }
10041 }
10042 
10043 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10044 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10045 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10046 // for predication.
10047 static ScalarEpilogueLowering getScalarEpilogueLowering(
10048     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10049     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10050     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10051     LoopVectorizationLegality &LVL) {
10052   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10053   // don't look at hints or options, and don't request a scalar epilogue.
10054   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10055   // LoopAccessInfo (due to code dependency and not being able to reliably get
10056   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10057   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10058   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10059   // back to the old way and vectorize with versioning when forced. See D81345.)
10060   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10061                                                       PGSOQueryType::IRPass) &&
10062                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10063     return CM_ScalarEpilogueNotAllowedOptSize;
10064 
10065   // 2) If set, obey the directives
10066   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10067     switch (PreferPredicateOverEpilogue) {
10068     case PreferPredicateTy::ScalarEpilogue:
10069       return CM_ScalarEpilogueAllowed;
10070     case PreferPredicateTy::PredicateElseScalarEpilogue:
10071       return CM_ScalarEpilogueNotNeededUsePredicate;
10072     case PreferPredicateTy::PredicateOrDontVectorize:
10073       return CM_ScalarEpilogueNotAllowedUsePredicate;
10074     };
10075   }
10076 
10077   // 3) If set, obey the hints
10078   switch (Hints.getPredicate()) {
10079   case LoopVectorizeHints::FK_Enabled:
10080     return CM_ScalarEpilogueNotNeededUsePredicate;
10081   case LoopVectorizeHints::FK_Disabled:
10082     return CM_ScalarEpilogueAllowed;
10083   };
10084 
10085   // 4) if the TTI hook indicates this is profitable, request predication.
10086   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10087                                        LVL.getLAI()))
10088     return CM_ScalarEpilogueNotNeededUsePredicate;
10089 
10090   return CM_ScalarEpilogueAllowed;
10091 }
10092 
10093 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10094   // If Values have been set for this Def return the one relevant for \p Part.
10095   if (hasVectorValue(Def, Part))
10096     return Data.PerPartOutput[Def][Part];
10097 
10098   if (!hasScalarValue(Def, {Part, 0})) {
10099     Value *IRV = Def->getLiveInIRValue();
10100     Value *B = ILV->getBroadcastInstrs(IRV);
10101     set(Def, B, Part);
10102     return B;
10103   }
10104 
10105   Value *ScalarValue = get(Def, {Part, 0});
10106   // If we aren't vectorizing, we can just copy the scalar map values over
10107   // to the vector map.
10108   if (VF.isScalar()) {
10109     set(Def, ScalarValue, Part);
10110     return ScalarValue;
10111   }
10112 
10113   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10114   bool IsUniform = RepR && RepR->isUniform();
10115 
10116   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10117   // Check if there is a scalar value for the selected lane.
10118   if (!hasScalarValue(Def, {Part, LastLane})) {
10119     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10120     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
10121            "unexpected recipe found to be invariant");
10122     IsUniform = true;
10123     LastLane = 0;
10124   }
10125 
10126   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10127   // Set the insert point after the last scalarized instruction or after the
10128   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10129   // will directly follow the scalar definitions.
10130   auto OldIP = Builder.saveIP();
10131   auto NewIP =
10132       isa<PHINode>(LastInst)
10133           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10134           : std::next(BasicBlock::iterator(LastInst));
10135   Builder.SetInsertPoint(&*NewIP);
10136 
10137   // However, if we are vectorizing, we need to construct the vector values.
10138   // If the value is known to be uniform after vectorization, we can just
10139   // broadcast the scalar value corresponding to lane zero for each unroll
10140   // iteration. Otherwise, we construct the vector values using
10141   // insertelement instructions. Since the resulting vectors are stored in
10142   // State, we will only generate the insertelements once.
10143   Value *VectorValue = nullptr;
10144   if (IsUniform) {
10145     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10146     set(Def, VectorValue, Part);
10147   } else {
10148     // Initialize packing with insertelements to start from undef.
10149     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10150     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10151     set(Def, Undef, Part);
10152     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10153       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10154     VectorValue = get(Def, Part);
10155   }
10156   Builder.restoreIP(OldIP);
10157   return VectorValue;
10158 }
10159 
10160 // Process the loop in the VPlan-native vectorization path. This path builds
10161 // VPlan upfront in the vectorization pipeline, which allows to apply
10162 // VPlan-to-VPlan transformations from the very beginning without modifying the
10163 // input LLVM IR.
10164 static bool processLoopInVPlanNativePath(
10165     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10166     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10167     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10168     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10169     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10170     LoopVectorizationRequirements &Requirements) {
10171 
10172   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10173     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10174     return false;
10175   }
10176   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10177   Function *F = L->getHeader()->getParent();
10178   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10179 
10180   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10181       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10182 
10183   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10184                                 &Hints, IAI);
10185   // Use the planner for outer loop vectorization.
10186   // TODO: CM is not used at this point inside the planner. Turn CM into an
10187   // optional argument if we don't need it in the future.
10188   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10189                                Requirements, ORE);
10190 
10191   // Get user vectorization factor.
10192   ElementCount UserVF = Hints.getWidth();
10193 
10194   CM.collectElementTypesForWidening();
10195 
10196   // Plan how to best vectorize, return the best VF and its cost.
10197   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10198 
10199   // If we are stress testing VPlan builds, do not attempt to generate vector
10200   // code. Masked vector code generation support will follow soon.
10201   // Also, do not attempt to vectorize if no vector code will be produced.
10202   if (VPlanBuildStressTest || EnableVPlanPredication ||
10203       VectorizationFactor::Disabled() == VF)
10204     return false;
10205 
10206   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10207 
10208   {
10209     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10210                              F->getParent()->getDataLayout());
10211     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10212                            &CM, BFI, PSI, Checks);
10213     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10214                       << L->getHeader()->getParent()->getName() << "\"\n");
10215     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10216   }
10217 
10218   // Mark the loop as already vectorized to avoid vectorizing again.
10219   Hints.setAlreadyVectorized();
10220   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10221   return true;
10222 }
10223 
10224 // Emit a remark if there are stores to floats that required a floating point
10225 // extension. If the vectorized loop was generated with floating point there
10226 // will be a performance penalty from the conversion overhead and the change in
10227 // the vector width.
10228 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10229   SmallVector<Instruction *, 4> Worklist;
10230   for (BasicBlock *BB : L->getBlocks()) {
10231     for (Instruction &Inst : *BB) {
10232       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10233         if (S->getValueOperand()->getType()->isFloatTy())
10234           Worklist.push_back(S);
10235       }
10236     }
10237   }
10238 
10239   // Traverse the floating point stores upwards searching, for floating point
10240   // conversions.
10241   SmallPtrSet<const Instruction *, 4> Visited;
10242   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10243   while (!Worklist.empty()) {
10244     auto *I = Worklist.pop_back_val();
10245     if (!L->contains(I))
10246       continue;
10247     if (!Visited.insert(I).second)
10248       continue;
10249 
10250     // Emit a remark if the floating point store required a floating
10251     // point conversion.
10252     // TODO: More work could be done to identify the root cause such as a
10253     // constant or a function return type and point the user to it.
10254     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10255       ORE->emit([&]() {
10256         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10257                                           I->getDebugLoc(), L->getHeader())
10258                << "floating point conversion changes vector width. "
10259                << "Mixed floating point precision requires an up/down "
10260                << "cast that will negatively impact performance.";
10261       });
10262 
10263     for (Use &Op : I->operands())
10264       if (auto *OpI = dyn_cast<Instruction>(Op))
10265         Worklist.push_back(OpI);
10266   }
10267 }
10268 
10269 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10270     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10271                                !EnableLoopInterleaving),
10272       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10273                               !EnableLoopVectorization) {}
10274 
10275 bool LoopVectorizePass::processLoop(Loop *L) {
10276   assert((EnableVPlanNativePath || L->isInnermost()) &&
10277          "VPlan-native path is not enabled. Only process inner loops.");
10278 
10279 #ifndef NDEBUG
10280   const std::string DebugLocStr = getDebugLocString(L);
10281 #endif /* NDEBUG */
10282 
10283   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
10284                     << L->getHeader()->getParent()->getName() << "\" from "
10285                     << DebugLocStr << "\n");
10286 
10287   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10288 
10289   LLVM_DEBUG(
10290       dbgs() << "LV: Loop hints:"
10291              << " force="
10292              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10293                      ? "disabled"
10294                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10295                             ? "enabled"
10296                             : "?"))
10297              << " width=" << Hints.getWidth()
10298              << " interleave=" << Hints.getInterleave() << "\n");
10299 
10300   // Function containing loop
10301   Function *F = L->getHeader()->getParent();
10302 
10303   // Looking at the diagnostic output is the only way to determine if a loop
10304   // was vectorized (other than looking at the IR or machine code), so it
10305   // is important to generate an optimization remark for each loop. Most of
10306   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10307   // generated as OptimizationRemark and OptimizationRemarkMissed are
10308   // less verbose reporting vectorized loops and unvectorized loops that may
10309   // benefit from vectorization, respectively.
10310 
10311   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10312     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10313     return false;
10314   }
10315 
10316   PredicatedScalarEvolution PSE(*SE, *L);
10317 
10318   // Check if it is legal to vectorize the loop.
10319   LoopVectorizationRequirements Requirements;
10320   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10321                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10322   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10323     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10324     Hints.emitRemarkWithHints();
10325     return false;
10326   }
10327 
10328   // Check the function attributes and profiles to find out if this function
10329   // should be optimized for size.
10330   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10331       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10332 
10333   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10334   // here. They may require CFG and instruction level transformations before
10335   // even evaluating whether vectorization is profitable. Since we cannot modify
10336   // the incoming IR, we need to build VPlan upfront in the vectorization
10337   // pipeline.
10338   if (!L->isInnermost())
10339     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10340                                         ORE, BFI, PSI, Hints, Requirements);
10341 
10342   assert(L->isInnermost() && "Inner loop expected.");
10343 
10344   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10345   // count by optimizing for size, to minimize overheads.
10346   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10347   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10348     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10349                       << "This loop is worth vectorizing only if no scalar "
10350                       << "iteration overheads are incurred.");
10351     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10352       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10353     else {
10354       LLVM_DEBUG(dbgs() << "\n");
10355       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10356     }
10357   }
10358 
10359   // Check the function attributes to see if implicit floats are allowed.
10360   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10361   // an integer loop and the vector instructions selected are purely integer
10362   // vector instructions?
10363   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10364     reportVectorizationFailure(
10365         "Can't vectorize when the NoImplicitFloat attribute is used",
10366         "loop not vectorized due to NoImplicitFloat attribute",
10367         "NoImplicitFloat", ORE, L);
10368     Hints.emitRemarkWithHints();
10369     return false;
10370   }
10371 
10372   // Check if the target supports potentially unsafe FP vectorization.
10373   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10374   // for the target we're vectorizing for, to make sure none of the
10375   // additional fp-math flags can help.
10376   if (Hints.isPotentiallyUnsafe() &&
10377       TTI->isFPVectorizationPotentiallyUnsafe()) {
10378     reportVectorizationFailure(
10379         "Potentially unsafe FP op prevents vectorization",
10380         "loop not vectorized due to unsafe FP support.",
10381         "UnsafeFP", ORE, L);
10382     Hints.emitRemarkWithHints();
10383     return false;
10384   }
10385 
10386   bool AllowOrderedReductions;
10387   // If the flag is set, use that instead and override the TTI behaviour.
10388   if (ForceOrderedReductions.getNumOccurrences() > 0)
10389     AllowOrderedReductions = ForceOrderedReductions;
10390   else
10391     AllowOrderedReductions = TTI->enableOrderedReductions();
10392   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10393     ORE->emit([&]() {
10394       auto *ExactFPMathInst = Requirements.getExactFPInst();
10395       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10396                                                  ExactFPMathInst->getDebugLoc(),
10397                                                  ExactFPMathInst->getParent())
10398              << "loop not vectorized: cannot prove it is safe to reorder "
10399                 "floating-point operations";
10400     });
10401     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10402                          "reorder floating-point operations\n");
10403     Hints.emitRemarkWithHints();
10404     return false;
10405   }
10406 
10407   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10408   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10409 
10410   // If an override option has been passed in for interleaved accesses, use it.
10411   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10412     UseInterleaved = EnableInterleavedMemAccesses;
10413 
10414   // Analyze interleaved memory accesses.
10415   if (UseInterleaved) {
10416     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10417   }
10418 
10419   // Use the cost model.
10420   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10421                                 F, &Hints, IAI);
10422   CM.collectValuesToIgnore();
10423   CM.collectElementTypesForWidening();
10424 
10425   // Use the planner for vectorization.
10426   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10427                                Requirements, ORE);
10428 
10429   // Get user vectorization factor and interleave count.
10430   ElementCount UserVF = Hints.getWidth();
10431   unsigned UserIC = Hints.getInterleave();
10432 
10433   // Plan how to best vectorize, return the best VF and its cost.
10434   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10435 
10436   VectorizationFactor VF = VectorizationFactor::Disabled();
10437   unsigned IC = 1;
10438 
10439   if (MaybeVF) {
10440     VF = *MaybeVF;
10441     // Select the interleave count.
10442     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10443   }
10444 
10445   // Identify the diagnostic messages that should be produced.
10446   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10447   bool VectorizeLoop = true, InterleaveLoop = true;
10448   if (VF.Width.isScalar()) {
10449     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10450     VecDiagMsg = std::make_pair(
10451         "VectorizationNotBeneficial",
10452         "the cost-model indicates that vectorization is not beneficial");
10453     VectorizeLoop = false;
10454   }
10455 
10456   if (!MaybeVF && UserIC > 1) {
10457     // Tell the user interleaving was avoided up-front, despite being explicitly
10458     // requested.
10459     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10460                          "interleaving should be avoided up front\n");
10461     IntDiagMsg = std::make_pair(
10462         "InterleavingAvoided",
10463         "Ignoring UserIC, because interleaving was avoided up front");
10464     InterleaveLoop = false;
10465   } else if (IC == 1 && UserIC <= 1) {
10466     // Tell the user interleaving is not beneficial.
10467     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10468     IntDiagMsg = std::make_pair(
10469         "InterleavingNotBeneficial",
10470         "the cost-model indicates that interleaving is not beneficial");
10471     InterleaveLoop = false;
10472     if (UserIC == 1) {
10473       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10474       IntDiagMsg.second +=
10475           " and is explicitly disabled or interleave count is set to 1";
10476     }
10477   } else if (IC > 1 && UserIC == 1) {
10478     // Tell the user interleaving is beneficial, but it explicitly disabled.
10479     LLVM_DEBUG(
10480         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10481     IntDiagMsg = std::make_pair(
10482         "InterleavingBeneficialButDisabled",
10483         "the cost-model indicates that interleaving is beneficial "
10484         "but is explicitly disabled or interleave count is set to 1");
10485     InterleaveLoop = false;
10486   }
10487 
10488   // Override IC if user provided an interleave count.
10489   IC = UserIC > 0 ? UserIC : IC;
10490 
10491   // Emit diagnostic messages, if any.
10492   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10493   if (!VectorizeLoop && !InterleaveLoop) {
10494     // Do not vectorize or interleaving the loop.
10495     ORE->emit([&]() {
10496       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10497                                       L->getStartLoc(), L->getHeader())
10498              << VecDiagMsg.second;
10499     });
10500     ORE->emit([&]() {
10501       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10502                                       L->getStartLoc(), L->getHeader())
10503              << IntDiagMsg.second;
10504     });
10505     return false;
10506   } else if (!VectorizeLoop && InterleaveLoop) {
10507     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10508     ORE->emit([&]() {
10509       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10510                                         L->getStartLoc(), L->getHeader())
10511              << VecDiagMsg.second;
10512     });
10513   } else if (VectorizeLoop && !InterleaveLoop) {
10514     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10515                       << ") in " << DebugLocStr << '\n');
10516     ORE->emit([&]() {
10517       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10518                                         L->getStartLoc(), L->getHeader())
10519              << IntDiagMsg.second;
10520     });
10521   } else if (VectorizeLoop && InterleaveLoop) {
10522     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10523                       << ") in " << DebugLocStr << '\n');
10524     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10525   }
10526 
10527   bool DisableRuntimeUnroll = false;
10528   MDNode *OrigLoopID = L->getLoopID();
10529   {
10530     // Optimistically generate runtime checks. Drop them if they turn out to not
10531     // be profitable. Limit the scope of Checks, so the cleanup happens
10532     // immediately after vector codegeneration is done.
10533     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10534                              F->getParent()->getDataLayout());
10535     if (!VF.Width.isScalar() || IC > 1)
10536       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10537 
10538     using namespace ore;
10539     if (!VectorizeLoop) {
10540       assert(IC > 1 && "interleave count should not be 1 or 0");
10541       // If we decided that it is not legal to vectorize the loop, then
10542       // interleave it.
10543       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10544                                  &CM, BFI, PSI, Checks);
10545 
10546       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10547       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10548 
10549       ORE->emit([&]() {
10550         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10551                                   L->getHeader())
10552                << "interleaved loop (interleaved count: "
10553                << NV("InterleaveCount", IC) << ")";
10554       });
10555     } else {
10556       // If we decided that it is *legal* to vectorize the loop, then do it.
10557 
10558       // Consider vectorizing the epilogue too if it's profitable.
10559       VectorizationFactor EpilogueVF =
10560           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10561       if (EpilogueVF.Width.isVector()) {
10562 
10563         // The first pass vectorizes the main loop and creates a scalar epilogue
10564         // to be vectorized by executing the plan (potentially with a different
10565         // factor) again shortly afterwards.
10566         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10567         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10568                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10569 
10570         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10571         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10572                         DT);
10573         ++LoopsVectorized;
10574 
10575         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10576         formLCSSARecursively(*L, *DT, LI, SE);
10577 
10578         // Second pass vectorizes the epilogue and adjusts the control flow
10579         // edges from the first pass.
10580         EPI.MainLoopVF = EPI.EpilogueVF;
10581         EPI.MainLoopUF = EPI.EpilogueUF;
10582         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10583                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10584                                                  Checks);
10585 
10586         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10587 
10588         // Ensure that the start values for any VPReductionPHIRecipes are
10589         // updated before vectorising the epilogue loop.
10590         VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock();
10591         for (VPRecipeBase &R : Header->phis()) {
10592           if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10593             if (auto *Resume = MainILV.getReductionResumeValue(
10594                     ReductionPhi->getRecurrenceDescriptor())) {
10595               VPValue *StartVal = new VPValue(Resume);
10596               BestEpiPlan.addExternalDef(StartVal);
10597               ReductionPhi->setOperand(0, StartVal);
10598             }
10599           }
10600         }
10601 
10602         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10603                         DT);
10604         ++LoopsEpilogueVectorized;
10605 
10606         if (!MainILV.areSafetyChecksAdded())
10607           DisableRuntimeUnroll = true;
10608       } else {
10609         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10610                                &LVL, &CM, BFI, PSI, Checks);
10611 
10612         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10613         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10614         ++LoopsVectorized;
10615 
10616         // Add metadata to disable runtime unrolling a scalar loop when there
10617         // are no runtime checks about strides and memory. A scalar loop that is
10618         // rarely used is not worth unrolling.
10619         if (!LB.areSafetyChecksAdded())
10620           DisableRuntimeUnroll = true;
10621       }
10622       // Report the vectorization decision.
10623       ORE->emit([&]() {
10624         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10625                                   L->getHeader())
10626                << "vectorized loop (vectorization width: "
10627                << NV("VectorizationFactor", VF.Width)
10628                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10629       });
10630     }
10631 
10632     if (ORE->allowExtraAnalysis(LV_NAME))
10633       checkMixedPrecision(L, ORE);
10634   }
10635 
10636   Optional<MDNode *> RemainderLoopID =
10637       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10638                                       LLVMLoopVectorizeFollowupEpilogue});
10639   if (RemainderLoopID.hasValue()) {
10640     L->setLoopID(RemainderLoopID.getValue());
10641   } else {
10642     if (DisableRuntimeUnroll)
10643       AddRuntimeUnrollDisableMetaData(L);
10644 
10645     // Mark the loop as already vectorized to avoid vectorizing again.
10646     Hints.setAlreadyVectorized();
10647   }
10648 
10649   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10650   return true;
10651 }
10652 
10653 LoopVectorizeResult LoopVectorizePass::runImpl(
10654     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10655     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10656     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10657     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10658     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10659   SE = &SE_;
10660   LI = &LI_;
10661   TTI = &TTI_;
10662   DT = &DT_;
10663   BFI = &BFI_;
10664   TLI = TLI_;
10665   AA = &AA_;
10666   AC = &AC_;
10667   GetLAA = &GetLAA_;
10668   DB = &DB_;
10669   ORE = &ORE_;
10670   PSI = PSI_;
10671 
10672   // Don't attempt if
10673   // 1. the target claims to have no vector registers, and
10674   // 2. interleaving won't help ILP.
10675   //
10676   // The second condition is necessary because, even if the target has no
10677   // vector registers, loop vectorization may still enable scalar
10678   // interleaving.
10679   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10680       TTI->getMaxInterleaveFactor(1) < 2)
10681     return LoopVectorizeResult(false, false);
10682 
10683   bool Changed = false, CFGChanged = false;
10684 
10685   // The vectorizer requires loops to be in simplified form.
10686   // Since simplification may add new inner loops, it has to run before the
10687   // legality and profitability checks. This means running the loop vectorizer
10688   // will simplify all loops, regardless of whether anything end up being
10689   // vectorized.
10690   for (auto &L : *LI)
10691     Changed |= CFGChanged |=
10692         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10693 
10694   // Build up a worklist of inner-loops to vectorize. This is necessary as
10695   // the act of vectorizing or partially unrolling a loop creates new loops
10696   // and can invalidate iterators across the loops.
10697   SmallVector<Loop *, 8> Worklist;
10698 
10699   for (Loop *L : *LI)
10700     collectSupportedLoops(*L, LI, ORE, Worklist);
10701 
10702   LoopsAnalyzed += Worklist.size();
10703 
10704   // Now walk the identified inner loops.
10705   while (!Worklist.empty()) {
10706     Loop *L = Worklist.pop_back_val();
10707 
10708     // For the inner loops we actually process, form LCSSA to simplify the
10709     // transform.
10710     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10711 
10712     Changed |= CFGChanged |= processLoop(L);
10713   }
10714 
10715   // Process each loop nest in the function.
10716   return LoopVectorizeResult(Changed, CFGChanged);
10717 }
10718 
10719 PreservedAnalyses LoopVectorizePass::run(Function &F,
10720                                          FunctionAnalysisManager &AM) {
10721     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10722     auto &LI = AM.getResult<LoopAnalysis>(F);
10723     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10724     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10725     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10726     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10727     auto &AA = AM.getResult<AAManager>(F);
10728     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10729     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10730     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10731 
10732     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10733     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10734         [&](Loop &L) -> const LoopAccessInfo & {
10735       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10736                                         TLI, TTI, nullptr, nullptr, nullptr};
10737       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10738     };
10739     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10740     ProfileSummaryInfo *PSI =
10741         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10742     LoopVectorizeResult Result =
10743         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10744     if (!Result.MadeAnyChange)
10745       return PreservedAnalyses::all();
10746     PreservedAnalyses PA;
10747 
10748     // We currently do not preserve loopinfo/dominator analyses with outer loop
10749     // vectorization. Until this is addressed, mark these analyses as preserved
10750     // only for non-VPlan-native path.
10751     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10752     if (!EnableVPlanNativePath) {
10753       PA.preserve<LoopAnalysis>();
10754       PA.preserve<DominatorTreeAnalysis>();
10755     }
10756 
10757     if (Result.MadeCFGChange) {
10758       // Making CFG changes likely means a loop got vectorized. Indicate that
10759       // extra simplification passes should be run.
10760       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10761       // be run if runtime checks have been added.
10762       AM.getResult<ShouldRunExtraVectorPasses>(F);
10763       PA.preserve<ShouldRunExtraVectorPasses>();
10764     } else {
10765       PA.preserveSet<CFGAnalyses>();
10766     }
10767     return PA;
10768 }
10769 
10770 void LoopVectorizePass::printPipeline(
10771     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10772   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10773       OS, MapClassName2PassName);
10774 
10775   OS << "<";
10776   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10777   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10778   OS << ">";
10779 }
10780