1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 /// The number of stores in a loop that are allowed to need predication.
311 static cl::opt<unsigned> NumberOfStoresToPredicate(
312     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313     cl::desc("Max number of stores to be predicated behind an if."));
314 
315 static cl::opt<bool> EnableIndVarRegisterHeur(
316     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317     cl::desc("Count the induction variable only once when interleaving"));
318 
319 static cl::opt<bool> EnableCondStoresVectorization(
320     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321     cl::desc("Enable if predication of stores during vectorization."));
322 
323 static cl::opt<unsigned> MaxNestedScalarReductionIC(
324     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325     cl::desc("The maximum interleave count to use when interleaving a scalar "
326              "reduction in a nested loop."));
327 
328 static cl::opt<bool>
329     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330                            cl::Hidden,
331                            cl::desc("Prefer in-loop vector reductions, "
332                                     "overriding the targets preference."));
333 
334 static cl::opt<bool> ForceOrderedReductions(
335     "force-ordered-reductions", cl::init(false), cl::Hidden,
336     cl::desc("Enable the vectorisation of loops with in-order (strict) "
337              "FP reductions"));
338 
339 static cl::opt<bool> PreferPredicatedReductionSelect(
340     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341     cl::desc(
342         "Prefer predicating a reduction operation over an after loop select."));
343 
344 cl::opt<bool> EnableVPlanNativePath(
345     "enable-vplan-native-path", cl::init(false), cl::Hidden,
346     cl::desc("Enable VPlan-native vectorization path with "
347              "support for outer loop vectorization."));
348 
349 // FIXME: Remove this switch once we have divergence analysis. Currently we
350 // assume divergent non-backedge branches when this switch is true.
351 cl::opt<bool> EnableVPlanPredication(
352     "enable-vplan-predication", cl::init(false), cl::Hidden,
353     cl::desc("Enable VPlan-native vectorization path predicator with "
354              "support for outer loop vectorization."));
355 
356 // This flag enables the stress testing of the VPlan H-CFG construction in the
357 // VPlan-native vectorization path. It must be used in conjuction with
358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359 // verification of the H-CFGs built.
360 static cl::opt<bool> VPlanBuildStressTest(
361     "vplan-build-stress-test", cl::init(false), cl::Hidden,
362     cl::desc(
363         "Build VPlan for every supported loop nest in the function and bail "
364         "out right after the build (stress test the VPlan H-CFG construction "
365         "in the VPlan-native vectorization path)."));
366 
367 cl::opt<bool> llvm::EnableLoopInterleaving(
368     "interleave-loops", cl::init(true), cl::Hidden,
369     cl::desc("Enable loop interleaving in Loop vectorization passes"));
370 cl::opt<bool> llvm::EnableLoopVectorization(
371     "vectorize-loops", cl::init(true), cl::Hidden,
372     cl::desc("Run the Loop vectorization passes"));
373 
374 cl::opt<bool> PrintVPlansInDotFormat(
375     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376     cl::desc("Use dot format instead of plain text when dumping VPlans"));
377 
378 /// A helper function that returns true if the given type is irregular. The
379 /// type is irregular if its allocated size doesn't equal the store size of an
380 /// element of the corresponding vector type.
381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
382   // Determine if an array of N elements of type Ty is "bitcast compatible"
383   // with a <N x Ty> vector.
384   // This is only true if there is no padding between the array elements.
385   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
386 }
387 
388 /// A helper function that returns the reciprocal of the block probability of
389 /// predicated blocks. If we return X, we are assuming the predicated block
390 /// will execute once for every X iterations of the loop header.
391 ///
392 /// TODO: We should use actual block probability here, if available. Currently,
393 ///       we always assume predicated blocks have a 50% chance of executing.
394 static unsigned getReciprocalPredBlockProb() { return 2; }
395 
396 /// A helper function that returns an integer or floating-point constant with
397 /// value C.
398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
399   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
400                            : ConstantFP::get(Ty, C);
401 }
402 
403 /// Returns "best known" trip count for the specified loop \p L as defined by
404 /// the following procedure:
405 ///   1) Returns exact trip count if it is known.
406 ///   2) Returns expected trip count according to profile data if any.
407 ///   3) Returns upper bound estimate if it is known.
408 ///   4) Returns None if all of the above failed.
409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
410   // Check if exact trip count is known.
411   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
412     return ExpectedTC;
413 
414   // Check if there is an expected trip count available from profile data.
415   if (LoopVectorizeWithBlockFrequency)
416     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
417       return EstimatedTC;
418 
419   // Check if upper bound estimate is known.
420   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
421     return ExpectedTC;
422 
423   return None;
424 }
425 
426 // Forward declare GeneratedRTChecks.
427 class GeneratedRTChecks;
428 
429 namespace llvm {
430 
431 AnalysisKey ShouldRunExtraVectorPasses::Key;
432 
433 /// InnerLoopVectorizer vectorizes loops which contain only one basic
434 /// block to a specified vectorization factor (VF).
435 /// This class performs the widening of scalars into vectors, or multiple
436 /// scalars. This class also implements the following features:
437 /// * It inserts an epilogue loop for handling loops that don't have iteration
438 ///   counts that are known to be a multiple of the vectorization factor.
439 /// * It handles the code generation for reduction variables.
440 /// * Scalarization (implementation using scalars) of un-vectorizable
441 ///   instructions.
442 /// InnerLoopVectorizer does not perform any vectorization-legality
443 /// checks, and relies on the caller to check for the different legality
444 /// aspects. The InnerLoopVectorizer relies on the
445 /// LoopVectorizationLegality class to provide information about the induction
446 /// and reduction variables that were found to a given vectorization factor.
447 class InnerLoopVectorizer {
448 public:
449   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
450                       LoopInfo *LI, DominatorTree *DT,
451                       const TargetLibraryInfo *TLI,
452                       const TargetTransformInfo *TTI, AssumptionCache *AC,
453                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
454                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
455                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
456                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
457       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
458         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
459         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
460         PSI(PSI), RTChecks(RTChecks) {
461     // Query this against the original loop and save it here because the profile
462     // of the original loop header may change as the transformation happens.
463     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
464         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
465   }
466 
467   virtual ~InnerLoopVectorizer() = default;
468 
469   /// Create a new empty loop that will contain vectorized instructions later
470   /// on, while the old loop will be used as the scalar remainder. Control flow
471   /// is generated around the vectorized (and scalar epilogue) loops consisting
472   /// of various checks and bypasses. Return the pre-header block of the new
473   /// loop and the start value for the canonical induction, if it is != 0. The
474   /// latter is the case when vectorizing the epilogue loop. In the case of
475   /// epilogue vectorization, this function is overriden to handle the more
476   /// complex control flow around the loops.
477   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
478 
479   /// Widen a single call instruction within the innermost loop.
480   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
481                             VPTransformState &State);
482 
483   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
484   void fixVectorizedLoop(VPTransformState &State);
485 
486   // Return true if any runtime check is added.
487   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
488 
489   /// A type for vectorized values in the new loop. Each value from the
490   /// original loop, when vectorized, is represented by UF vector values in the
491   /// new unrolled loop, where UF is the unroll factor.
492   using VectorParts = SmallVector<Value *, 2>;
493 
494   /// Vectorize a single first-order recurrence or pointer induction PHINode in
495   /// a block. This method handles the induction variable canonicalization. It
496   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
497   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
498                            VPTransformState &State);
499 
500   /// A helper function to scalarize a single Instruction in the innermost loop.
501   /// Generates a sequence of scalar instances for each lane between \p MinLane
502   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
503   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
504   /// Instr's operands.
505   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
506                             const VPIteration &Instance, bool IfPredicateInstr,
507                             VPTransformState &State);
508 
509   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
510   /// is provided, the integer induction variable will first be truncated to
511   /// the corresponding type. \p CanonicalIV is the scalar value generated for
512   /// the canonical induction variable.
513   void widenIntOrFpInduction(PHINode *IV, const InductionDescriptor &ID,
514                              Value *Start, TruncInst *Trunc, VPValue *Def,
515                              VPTransformState &State, Value *CanonicalIV);
516 
517   /// Construct the vector value of a scalarized value \p V one lane at a time.
518   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
519                                  VPTransformState &State);
520 
521   /// Try to vectorize interleaved access group \p Group with the base address
522   /// given in \p Addr, optionally masking the vector operations if \p
523   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
524   /// values in the vectorized loop.
525   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
526                                 ArrayRef<VPValue *> VPDefs,
527                                 VPTransformState &State, VPValue *Addr,
528                                 ArrayRef<VPValue *> StoredValues,
529                                 VPValue *BlockInMask = nullptr);
530 
531   /// Set the debug location in the builder \p Ptr using the debug location in
532   /// \p V. If \p Ptr is None then it uses the class member's Builder.
533   void setDebugLocFromInst(const Value *V,
534                            Optional<IRBuilder<> *> CustomBuilder = None);
535 
536   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
537   void fixNonInductionPHIs(VPTransformState &State);
538 
539   /// Returns true if the reordering of FP operations is not allowed, but we are
540   /// able to vectorize with strict in-order reductions for the given RdxDesc.
541   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
542 
543   /// Create a broadcast instruction. This method generates a broadcast
544   /// instruction (shuffle) for loop invariant values and for the induction
545   /// value. If this is the induction variable then we extend it to N, N+1, ...
546   /// this is needed because each iteration in the loop corresponds to a SIMD
547   /// element.
548   virtual Value *getBroadcastInstrs(Value *V);
549 
550   /// Add metadata from one instruction to another.
551   ///
552   /// This includes both the original MDs from \p From and additional ones (\see
553   /// addNewMetadata).  Use this for *newly created* instructions in the vector
554   /// loop.
555   void addMetadata(Instruction *To, Instruction *From);
556 
557   /// Similar to the previous function but it adds the metadata to a
558   /// vector of instructions.
559   void addMetadata(ArrayRef<Value *> To, Instruction *From);
560 
561 protected:
562   friend class LoopVectorizationPlanner;
563 
564   /// A small list of PHINodes.
565   using PhiVector = SmallVector<PHINode *, 4>;
566 
567   /// A type for scalarized values in the new loop. Each value from the
568   /// original loop, when scalarized, is represented by UF x VF scalar values
569   /// in the new unrolled loop, where UF is the unroll factor and VF is the
570   /// vectorization factor.
571   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
572 
573   /// Set up the values of the IVs correctly when exiting the vector loop.
574   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
575                     Value *CountRoundDown, Value *EndValue,
576                     BasicBlock *MiddleBlock);
577 
578   /// Introduce a conditional branch (on true, condition to be set later) at the
579   /// end of the header=latch connecting it to itself (across the backedge) and
580   /// to the exit block of \p L.
581   void createHeaderBranch(Loop *L);
582 
583   /// Handle all cross-iteration phis in the header.
584   void fixCrossIterationPHIs(VPTransformState &State);
585 
586   /// Create the exit value of first order recurrences in the middle block and
587   /// update their users.
588   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
589                                VPTransformState &State);
590 
591   /// Create code for the loop exit value of the reduction.
592   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
593 
594   /// Clear NSW/NUW flags from reduction instructions if necessary.
595   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
596                                VPTransformState &State);
597 
598   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
599   /// means we need to add the appropriate incoming value from the middle
600   /// block as exiting edges from the scalar epilogue loop (if present) are
601   /// already in place, and we exit the vector loop exclusively to the middle
602   /// block.
603   void fixLCSSAPHIs(VPTransformState &State);
604 
605   /// Iteratively sink the scalarized operands of a predicated instruction into
606   /// the block that was created for it.
607   void sinkScalarOperands(Instruction *PredInst);
608 
609   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
610   /// represented as.
611   void truncateToMinimalBitwidths(VPTransformState &State);
612 
613   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
614   /// variable on which to base the steps, \p Step is the size of the step, and
615   /// \p EntryVal is the value from the original loop that maps to the steps.
616   /// Note that \p EntryVal doesn't have to be an induction variable - it
617   /// can also be a truncate instruction.
618   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
619                         const InductionDescriptor &ID, VPValue *Def,
620                         VPTransformState &State);
621 
622   /// Create a vector induction phi node based on an existing scalar one. \p
623   /// EntryVal is the value from the original loop that maps to the vector phi
624   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
625   /// truncate instruction, instead of widening the original IV, we widen a
626   /// version of the IV truncated to \p EntryVal's type.
627   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
628                                        Value *Step, Value *Start,
629                                        Instruction *EntryVal, VPValue *Def,
630                                        VPTransformState &State);
631 
632   /// Returns true if an instruction \p I should be scalarized instead of
633   /// vectorized for the chosen vectorization factor.
634   bool shouldScalarizeInstruction(Instruction *I) const;
635 
636   /// Returns true if we should generate a scalar version of \p IV.
637   bool needsScalarInduction(Instruction *IV) const;
638 
639   /// Returns (and creates if needed) the original loop trip count.
640   Value *getOrCreateTripCount(Loop *NewLoop);
641 
642   /// Returns (and creates if needed) the trip count of the widened loop.
643   Value *getOrCreateVectorTripCount(Loop *NewLoop);
644 
645   /// Returns a bitcasted value to the requested vector type.
646   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
647   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
648                                 const DataLayout &DL);
649 
650   /// Emit a bypass check to see if the vector trip count is zero, including if
651   /// it overflows.
652   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
653 
654   /// Emit a bypass check to see if all of the SCEV assumptions we've
655   /// had to make are correct. Returns the block containing the checks or
656   /// nullptr if no checks have been added.
657   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
658 
659   /// Emit bypass checks to check any memory assumptions we may have made.
660   /// Returns the block containing the checks or nullptr if no checks have been
661   /// added.
662   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
663 
664   /// Compute the transformed value of Index at offset StartValue using step
665   /// StepValue.
666   /// For integer induction, returns StartValue + Index * StepValue.
667   /// For pointer induction, returns StartValue[Index * StepValue].
668   /// FIXME: The newly created binary instructions should contain nsw/nuw
669   /// flags, which can be found from the original scalar operations.
670   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
671                               const DataLayout &DL,
672                               const InductionDescriptor &ID,
673                               BasicBlock *VectorHeader) const;
674 
675   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
676   /// vector loop preheader, middle block and scalar preheader. Also
677   /// allocate a loop object for the new vector loop and return it.
678   Loop *createVectorLoopSkeleton(StringRef Prefix);
679 
680   /// Create new phi nodes for the induction variables to resume iteration count
681   /// in the scalar epilogue, from where the vectorized loop left off.
682   /// In cases where the loop skeleton is more complicated (eg. epilogue
683   /// vectorization) and the resume values can come from an additional bypass
684   /// block, the \p AdditionalBypass pair provides information about the bypass
685   /// block and the end value on the edge from bypass to this loop.
686   void createInductionResumeValues(
687       Loop *L,
688       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
689 
690   /// Complete the loop skeleton by adding debug MDs, creating appropriate
691   /// conditional branches in the middle block, preparing the builder and
692   /// running the verifier. Take in the vector loop \p L as argument, and return
693   /// the preheader of the completed vector loop.
694   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
695 
696   /// Add additional metadata to \p To that was not present on \p Orig.
697   ///
698   /// Currently this is used to add the noalias annotations based on the
699   /// inserted memchecks.  Use this for instructions that are *cloned* into the
700   /// vector loop.
701   void addNewMetadata(Instruction *To, const Instruction *Orig);
702 
703   /// Collect poison-generating recipes that may generate a poison value that is
704   /// used after vectorization, even when their operands are not poison. Those
705   /// recipes meet the following conditions:
706   ///  * Contribute to the address computation of a recipe generating a widen
707   ///    memory load/store (VPWidenMemoryInstructionRecipe or
708   ///    VPInterleaveRecipe).
709   ///  * Such a widen memory load/store has at least one underlying Instruction
710   ///    that is in a basic block that needs predication and after vectorization
711   ///    the generated instruction won't be predicated.
712   void collectPoisonGeneratingRecipes(VPTransformState &State);
713 
714   /// Allow subclasses to override and print debug traces before/after vplan
715   /// execution, when trace information is requested.
716   virtual void printDebugTracesAtStart(){};
717   virtual void printDebugTracesAtEnd(){};
718 
719   /// The original loop.
720   Loop *OrigLoop;
721 
722   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
723   /// dynamic knowledge to simplify SCEV expressions and converts them to a
724   /// more usable form.
725   PredicatedScalarEvolution &PSE;
726 
727   /// Loop Info.
728   LoopInfo *LI;
729 
730   /// Dominator Tree.
731   DominatorTree *DT;
732 
733   /// Alias Analysis.
734   AAResults *AA;
735 
736   /// Target Library Info.
737   const TargetLibraryInfo *TLI;
738 
739   /// Target Transform Info.
740   const TargetTransformInfo *TTI;
741 
742   /// Assumption Cache.
743   AssumptionCache *AC;
744 
745   /// Interface to emit optimization remarks.
746   OptimizationRemarkEmitter *ORE;
747 
748   /// LoopVersioning.  It's only set up (non-null) if memchecks were
749   /// used.
750   ///
751   /// This is currently only used to add no-alias metadata based on the
752   /// memchecks.  The actually versioning is performed manually.
753   std::unique_ptr<LoopVersioning> LVer;
754 
755   /// The vectorization SIMD factor to use. Each vector will have this many
756   /// vector elements.
757   ElementCount VF;
758 
759   /// The vectorization unroll factor to use. Each scalar is vectorized to this
760   /// many different vector instructions.
761   unsigned UF;
762 
763   /// The builder that we use
764   IRBuilder<> Builder;
765 
766   // --- Vectorization state ---
767 
768   /// The vector-loop preheader.
769   BasicBlock *LoopVectorPreHeader;
770 
771   /// The scalar-loop preheader.
772   BasicBlock *LoopScalarPreHeader;
773 
774   /// Middle Block between the vector and the scalar.
775   BasicBlock *LoopMiddleBlock;
776 
777   /// The unique ExitBlock of the scalar loop if one exists.  Note that
778   /// there can be multiple exiting edges reaching this block.
779   BasicBlock *LoopExitBlock;
780 
781   /// The vector loop body.
782   BasicBlock *LoopVectorBody;
783 
784   /// The scalar loop body.
785   BasicBlock *LoopScalarBody;
786 
787   /// A list of all bypass blocks. The first block is the entry of the loop.
788   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
789 
790   /// Store instructions that were predicated.
791   SmallVector<Instruction *, 4> PredicatedInstructions;
792 
793   /// Trip count of the original loop.
794   Value *TripCount = nullptr;
795 
796   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
797   Value *VectorTripCount = nullptr;
798 
799   /// The legality analysis.
800   LoopVectorizationLegality *Legal;
801 
802   /// The profitablity analysis.
803   LoopVectorizationCostModel *Cost;
804 
805   // Record whether runtime checks are added.
806   bool AddedSafetyChecks = false;
807 
808   // Holds the end values for each induction variable. We save the end values
809   // so we can later fix-up the external users of the induction variables.
810   DenseMap<PHINode *, Value *> IVEndValues;
811 
812   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
813   // fixed up at the end of vector code generation.
814   SmallVector<PHINode *, 8> OrigPHIsToFix;
815 
816   /// BFI and PSI are used to check for profile guided size optimizations.
817   BlockFrequencyInfo *BFI;
818   ProfileSummaryInfo *PSI;
819 
820   // Whether this loop should be optimized for size based on profile guided size
821   // optimizatios.
822   bool OptForSizeBasedOnProfile;
823 
824   /// Structure to hold information about generated runtime checks, responsible
825   /// for cleaning the checks, if vectorization turns out unprofitable.
826   GeneratedRTChecks &RTChecks;
827 };
828 
829 class InnerLoopUnroller : public InnerLoopVectorizer {
830 public:
831   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
832                     LoopInfo *LI, DominatorTree *DT,
833                     const TargetLibraryInfo *TLI,
834                     const TargetTransformInfo *TTI, AssumptionCache *AC,
835                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
836                     LoopVectorizationLegality *LVL,
837                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
838                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
839       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
840                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
841                             BFI, PSI, Check) {}
842 
843 private:
844   Value *getBroadcastInstrs(Value *V) override;
845 };
846 
847 /// Encapsulate information regarding vectorization of a loop and its epilogue.
848 /// This information is meant to be updated and used across two stages of
849 /// epilogue vectorization.
850 struct EpilogueLoopVectorizationInfo {
851   ElementCount MainLoopVF = ElementCount::getFixed(0);
852   unsigned MainLoopUF = 0;
853   ElementCount EpilogueVF = ElementCount::getFixed(0);
854   unsigned EpilogueUF = 0;
855   BasicBlock *MainLoopIterationCountCheck = nullptr;
856   BasicBlock *EpilogueIterationCountCheck = nullptr;
857   BasicBlock *SCEVSafetyCheck = nullptr;
858   BasicBlock *MemSafetyCheck = nullptr;
859   Value *TripCount = nullptr;
860   Value *VectorTripCount = nullptr;
861 
862   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
863                                 ElementCount EVF, unsigned EUF)
864       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
865     assert(EUF == 1 &&
866            "A high UF for the epilogue loop is likely not beneficial.");
867   }
868 };
869 
870 /// An extension of the inner loop vectorizer that creates a skeleton for a
871 /// vectorized loop that has its epilogue (residual) also vectorized.
872 /// The idea is to run the vplan on a given loop twice, firstly to setup the
873 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
874 /// from the first step and vectorize the epilogue.  This is achieved by
875 /// deriving two concrete strategy classes from this base class and invoking
876 /// them in succession from the loop vectorizer planner.
877 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
878 public:
879   InnerLoopAndEpilogueVectorizer(
880       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
881       DominatorTree *DT, const TargetLibraryInfo *TLI,
882       const TargetTransformInfo *TTI, AssumptionCache *AC,
883       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
884       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
885       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
886       GeneratedRTChecks &Checks)
887       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
888                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
889                             Checks),
890         EPI(EPI) {}
891 
892   // Override this function to handle the more complex control flow around the
893   // three loops.
894   std::pair<BasicBlock *, Value *>
895   createVectorizedLoopSkeleton() final override {
896     return createEpilogueVectorizedLoopSkeleton();
897   }
898 
899   /// The interface for creating a vectorized skeleton using one of two
900   /// different strategies, each corresponding to one execution of the vplan
901   /// as described above.
902   virtual std::pair<BasicBlock *, Value *>
903   createEpilogueVectorizedLoopSkeleton() = 0;
904 
905   /// Holds and updates state information required to vectorize the main loop
906   /// and its epilogue in two separate passes. This setup helps us avoid
907   /// regenerating and recomputing runtime safety checks. It also helps us to
908   /// shorten the iteration-count-check path length for the cases where the
909   /// iteration count of the loop is so small that the main vector loop is
910   /// completely skipped.
911   EpilogueLoopVectorizationInfo &EPI;
912 };
913 
914 /// A specialized derived class of inner loop vectorizer that performs
915 /// vectorization of *main* loops in the process of vectorizing loops and their
916 /// epilogues.
917 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
918 public:
919   EpilogueVectorizerMainLoop(
920       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
921       DominatorTree *DT, const TargetLibraryInfo *TLI,
922       const TargetTransformInfo *TTI, AssumptionCache *AC,
923       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
924       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
925       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
926       GeneratedRTChecks &Check)
927       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
928                                        EPI, LVL, CM, BFI, PSI, Check) {}
929   /// Implements the interface for creating a vectorized skeleton using the
930   /// *main loop* strategy (ie the first pass of vplan execution).
931   std::pair<BasicBlock *, Value *>
932   createEpilogueVectorizedLoopSkeleton() final override;
933 
934 protected:
935   /// Emits an iteration count bypass check once for the main loop (when \p
936   /// ForEpilogue is false) and once for the epilogue loop (when \p
937   /// ForEpilogue is true).
938   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
939                                              bool ForEpilogue);
940   void printDebugTracesAtStart() override;
941   void printDebugTracesAtEnd() override;
942 };
943 
944 // A specialized derived class of inner loop vectorizer that performs
945 // vectorization of *epilogue* loops in the process of vectorizing loops and
946 // their epilogues.
947 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
948 public:
949   EpilogueVectorizerEpilogueLoop(
950       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
951       DominatorTree *DT, const TargetLibraryInfo *TLI,
952       const TargetTransformInfo *TTI, AssumptionCache *AC,
953       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
954       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
955       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
956       GeneratedRTChecks &Checks)
957       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
958                                        EPI, LVL, CM, BFI, PSI, Checks) {}
959   /// Implements the interface for creating a vectorized skeleton using the
960   /// *epilogue loop* strategy (ie the second pass of vplan execution).
961   std::pair<BasicBlock *, Value *>
962   createEpilogueVectorizedLoopSkeleton() final override;
963 
964 protected:
965   /// Emits an iteration count bypass check after the main vector loop has
966   /// finished to see if there are any iterations left to execute by either
967   /// the vector epilogue or the scalar epilogue.
968   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
969                                                       BasicBlock *Bypass,
970                                                       BasicBlock *Insert);
971   void printDebugTracesAtStart() override;
972   void printDebugTracesAtEnd() override;
973 };
974 } // end namespace llvm
975 
976 /// Look for a meaningful debug location on the instruction or it's
977 /// operands.
978 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
979   if (!I)
980     return I;
981 
982   DebugLoc Empty;
983   if (I->getDebugLoc() != Empty)
984     return I;
985 
986   for (Use &Op : I->operands()) {
987     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
988       if (OpInst->getDebugLoc() != Empty)
989         return OpInst;
990   }
991 
992   return I;
993 }
994 
995 void InnerLoopVectorizer::setDebugLocFromInst(
996     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
997   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
998   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
999     const DILocation *DIL = Inst->getDebugLoc();
1000 
1001     // When a FSDiscriminator is enabled, we don't need to add the multiply
1002     // factors to the discriminators.
1003     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1004         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1005       // FIXME: For scalable vectors, assume vscale=1.
1006       auto NewDIL =
1007           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1008       if (NewDIL)
1009         B->SetCurrentDebugLocation(NewDIL.getValue());
1010       else
1011         LLVM_DEBUG(dbgs()
1012                    << "Failed to create new discriminator: "
1013                    << DIL->getFilename() << " Line: " << DIL->getLine());
1014     } else
1015       B->SetCurrentDebugLocation(DIL);
1016   } else
1017     B->SetCurrentDebugLocation(DebugLoc());
1018 }
1019 
1020 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1021 /// is passed, the message relates to that particular instruction.
1022 #ifndef NDEBUG
1023 static void debugVectorizationMessage(const StringRef Prefix,
1024                                       const StringRef DebugMsg,
1025                                       Instruction *I) {
1026   dbgs() << "LV: " << Prefix << DebugMsg;
1027   if (I != nullptr)
1028     dbgs() << " " << *I;
1029   else
1030     dbgs() << '.';
1031   dbgs() << '\n';
1032 }
1033 #endif
1034 
1035 /// Create an analysis remark that explains why vectorization failed
1036 ///
1037 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1038 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1039 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1040 /// the location of the remark.  \return the remark object that can be
1041 /// streamed to.
1042 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1043     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1044   Value *CodeRegion = TheLoop->getHeader();
1045   DebugLoc DL = TheLoop->getStartLoc();
1046 
1047   if (I) {
1048     CodeRegion = I->getParent();
1049     // If there is no debug location attached to the instruction, revert back to
1050     // using the loop's.
1051     if (I->getDebugLoc())
1052       DL = I->getDebugLoc();
1053   }
1054 
1055   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1056 }
1057 
1058 namespace llvm {
1059 
1060 /// Return a value for Step multiplied by VF.
1061 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
1062                        int64_t Step) {
1063   assert(Ty->isIntegerTy() && "Expected an integer step");
1064   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1065   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1066 }
1067 
1068 /// Return the runtime value for VF.
1069 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1070   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1071   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1072 }
1073 
1074 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) {
1075   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1076   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1077   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1078   return B.CreateUIToFP(RuntimeVF, FTy);
1079 }
1080 
1081 void reportVectorizationFailure(const StringRef DebugMsg,
1082                                 const StringRef OREMsg, const StringRef ORETag,
1083                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1084                                 Instruction *I) {
1085   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1086   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1087   ORE->emit(
1088       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1089       << "loop not vectorized: " << OREMsg);
1090 }
1091 
1092 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1093                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1094                              Instruction *I) {
1095   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1096   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1097   ORE->emit(
1098       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1099       << Msg);
1100 }
1101 
1102 } // end namespace llvm
1103 
1104 #ifndef NDEBUG
1105 /// \return string containing a file name and a line # for the given loop.
1106 static std::string getDebugLocString(const Loop *L) {
1107   std::string Result;
1108   if (L) {
1109     raw_string_ostream OS(Result);
1110     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1111       LoopDbgLoc.print(OS);
1112     else
1113       // Just print the module name.
1114       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1115     OS.flush();
1116   }
1117   return Result;
1118 }
1119 #endif
1120 
1121 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1122                                          const Instruction *Orig) {
1123   // If the loop was versioned with memchecks, add the corresponding no-alias
1124   // metadata.
1125   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1126     LVer->annotateInstWithNoAlias(To, Orig);
1127 }
1128 
1129 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1130     VPTransformState &State) {
1131 
1132   // Collect recipes in the backward slice of `Root` that may generate a poison
1133   // value that is used after vectorization.
1134   SmallPtrSet<VPRecipeBase *, 16> Visited;
1135   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1136     SmallVector<VPRecipeBase *, 16> Worklist;
1137     Worklist.push_back(Root);
1138 
1139     // Traverse the backward slice of Root through its use-def chain.
1140     while (!Worklist.empty()) {
1141       VPRecipeBase *CurRec = Worklist.back();
1142       Worklist.pop_back();
1143 
1144       if (!Visited.insert(CurRec).second)
1145         continue;
1146 
1147       // Prune search if we find another recipe generating a widen memory
1148       // instruction. Widen memory instructions involved in address computation
1149       // will lead to gather/scatter instructions, which don't need to be
1150       // handled.
1151       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1152           isa<VPInterleaveRecipe>(CurRec) ||
1153           isa<VPCanonicalIVPHIRecipe>(CurRec))
1154         continue;
1155 
1156       // This recipe contributes to the address computation of a widen
1157       // load/store. Collect recipe if its underlying instruction has
1158       // poison-generating flags.
1159       Instruction *Instr = CurRec->getUnderlyingInstr();
1160       if (Instr && Instr->hasPoisonGeneratingFlags())
1161         State.MayGeneratePoisonRecipes.insert(CurRec);
1162 
1163       // Add new definitions to the worklist.
1164       for (VPValue *operand : CurRec->operands())
1165         if (VPDef *OpDef = operand->getDef())
1166           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1167     }
1168   });
1169 
1170   // Traverse all the recipes in the VPlan and collect the poison-generating
1171   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1172   // VPInterleaveRecipe.
1173   auto Iter = depth_first(
1174       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1175   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1176     for (VPRecipeBase &Recipe : *VPBB) {
1177       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1178         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1179         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1180         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1181             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1182           collectPoisonGeneratingInstrsInBackwardSlice(
1183               cast<VPRecipeBase>(AddrDef));
1184       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1185         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1186         if (AddrDef) {
1187           // Check if any member of the interleave group needs predication.
1188           const InterleaveGroup<Instruction> *InterGroup =
1189               InterleaveRec->getInterleaveGroup();
1190           bool NeedPredication = false;
1191           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1192                I < NumMembers; ++I) {
1193             Instruction *Member = InterGroup->getMember(I);
1194             if (Member)
1195               NeedPredication |=
1196                   Legal->blockNeedsPredication(Member->getParent());
1197           }
1198 
1199           if (NeedPredication)
1200             collectPoisonGeneratingInstrsInBackwardSlice(
1201                 cast<VPRecipeBase>(AddrDef));
1202         }
1203       }
1204     }
1205   }
1206 }
1207 
1208 void InnerLoopVectorizer::addMetadata(Instruction *To,
1209                                       Instruction *From) {
1210   propagateMetadata(To, From);
1211   addNewMetadata(To, From);
1212 }
1213 
1214 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1215                                       Instruction *From) {
1216   for (Value *V : To) {
1217     if (Instruction *I = dyn_cast<Instruction>(V))
1218       addMetadata(I, From);
1219   }
1220 }
1221 
1222 namespace llvm {
1223 
1224 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1225 // lowered.
1226 enum ScalarEpilogueLowering {
1227 
1228   // The default: allowing scalar epilogues.
1229   CM_ScalarEpilogueAllowed,
1230 
1231   // Vectorization with OptForSize: don't allow epilogues.
1232   CM_ScalarEpilogueNotAllowedOptSize,
1233 
1234   // A special case of vectorisation with OptForSize: loops with a very small
1235   // trip count are considered for vectorization under OptForSize, thereby
1236   // making sure the cost of their loop body is dominant, free of runtime
1237   // guards and scalar iteration overheads.
1238   CM_ScalarEpilogueNotAllowedLowTripLoop,
1239 
1240   // Loop hint predicate indicating an epilogue is undesired.
1241   CM_ScalarEpilogueNotNeededUsePredicate,
1242 
1243   // Directive indicating we must either tail fold or not vectorize
1244   CM_ScalarEpilogueNotAllowedUsePredicate
1245 };
1246 
1247 /// ElementCountComparator creates a total ordering for ElementCount
1248 /// for the purposes of using it in a set structure.
1249 struct ElementCountComparator {
1250   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1251     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1252            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1253   }
1254 };
1255 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1256 
1257 /// LoopVectorizationCostModel - estimates the expected speedups due to
1258 /// vectorization.
1259 /// In many cases vectorization is not profitable. This can happen because of
1260 /// a number of reasons. In this class we mainly attempt to predict the
1261 /// expected speedup/slowdowns due to the supported instruction set. We use the
1262 /// TargetTransformInfo to query the different backends for the cost of
1263 /// different operations.
1264 class LoopVectorizationCostModel {
1265 public:
1266   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1267                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1268                              LoopVectorizationLegality *Legal,
1269                              const TargetTransformInfo &TTI,
1270                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1271                              AssumptionCache *AC,
1272                              OptimizationRemarkEmitter *ORE, const Function *F,
1273                              const LoopVectorizeHints *Hints,
1274                              InterleavedAccessInfo &IAI)
1275       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1276         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1277         Hints(Hints), InterleaveInfo(IAI) {}
1278 
1279   /// \return An upper bound for the vectorization factors (both fixed and
1280   /// scalable). If the factors are 0, vectorization and interleaving should be
1281   /// avoided up front.
1282   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1283 
1284   /// \return True if runtime checks are required for vectorization, and false
1285   /// otherwise.
1286   bool runtimeChecksRequired();
1287 
1288   /// \return The most profitable vectorization factor and the cost of that VF.
1289   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1290   /// then this vectorization factor will be selected if vectorization is
1291   /// possible.
1292   VectorizationFactor
1293   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1294 
1295   VectorizationFactor
1296   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1297                                     const LoopVectorizationPlanner &LVP);
1298 
1299   /// Setup cost-based decisions for user vectorization factor.
1300   /// \return true if the UserVF is a feasible VF to be chosen.
1301   bool selectUserVectorizationFactor(ElementCount UserVF) {
1302     collectUniformsAndScalars(UserVF);
1303     collectInstsToScalarize(UserVF);
1304     return expectedCost(UserVF).first.isValid();
1305   }
1306 
1307   /// \return The size (in bits) of the smallest and widest types in the code
1308   /// that needs to be vectorized. We ignore values that remain scalar such as
1309   /// 64 bit loop indices.
1310   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1311 
1312   /// \return The desired interleave count.
1313   /// If interleave count has been specified by metadata it will be returned.
1314   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1315   /// are the selected vectorization factor and the cost of the selected VF.
1316   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1317 
1318   /// Memory access instruction may be vectorized in more than one way.
1319   /// Form of instruction after vectorization depends on cost.
1320   /// This function takes cost-based decisions for Load/Store instructions
1321   /// and collects them in a map. This decisions map is used for building
1322   /// the lists of loop-uniform and loop-scalar instructions.
1323   /// The calculated cost is saved with widening decision in order to
1324   /// avoid redundant calculations.
1325   void setCostBasedWideningDecision(ElementCount VF);
1326 
1327   /// A struct that represents some properties of the register usage
1328   /// of a loop.
1329   struct RegisterUsage {
1330     /// Holds the number of loop invariant values that are used in the loop.
1331     /// The key is ClassID of target-provided register class.
1332     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1333     /// Holds the maximum number of concurrent live intervals in the loop.
1334     /// The key is ClassID of target-provided register class.
1335     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1336   };
1337 
1338   /// \return Returns information about the register usages of the loop for the
1339   /// given vectorization factors.
1340   SmallVector<RegisterUsage, 8>
1341   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1342 
1343   /// Collect values we want to ignore in the cost model.
1344   void collectValuesToIgnore();
1345 
1346   /// Collect all element types in the loop for which widening is needed.
1347   void collectElementTypesForWidening();
1348 
1349   /// Split reductions into those that happen in the loop, and those that happen
1350   /// outside. In loop reductions are collected into InLoopReductionChains.
1351   void collectInLoopReductions();
1352 
1353   /// Returns true if we should use strict in-order reductions for the given
1354   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1355   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1356   /// of FP operations.
1357   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1358     return !Hints->allowReordering() && RdxDesc.isOrdered();
1359   }
1360 
1361   /// \returns The smallest bitwidth each instruction can be represented with.
1362   /// The vector equivalents of these instructions should be truncated to this
1363   /// type.
1364   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1365     return MinBWs;
1366   }
1367 
1368   /// \returns True if it is more profitable to scalarize instruction \p I for
1369   /// vectorization factor \p VF.
1370   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1371     assert(VF.isVector() &&
1372            "Profitable to scalarize relevant only for VF > 1.");
1373 
1374     // Cost model is not run in the VPlan-native path - return conservative
1375     // result until this changes.
1376     if (EnableVPlanNativePath)
1377       return false;
1378 
1379     auto Scalars = InstsToScalarize.find(VF);
1380     assert(Scalars != InstsToScalarize.end() &&
1381            "VF not yet analyzed for scalarization profitability");
1382     return Scalars->second.find(I) != Scalars->second.end();
1383   }
1384 
1385   /// Returns true if \p I is known to be uniform after vectorization.
1386   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1387     if (VF.isScalar())
1388       return true;
1389 
1390     // Cost model is not run in the VPlan-native path - return conservative
1391     // result until this changes.
1392     if (EnableVPlanNativePath)
1393       return false;
1394 
1395     auto UniformsPerVF = Uniforms.find(VF);
1396     assert(UniformsPerVF != Uniforms.end() &&
1397            "VF not yet analyzed for uniformity");
1398     return UniformsPerVF->second.count(I);
1399   }
1400 
1401   /// Returns true if \p I is known to be scalar after vectorization.
1402   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1403     if (VF.isScalar())
1404       return true;
1405 
1406     // Cost model is not run in the VPlan-native path - return conservative
1407     // result until this changes.
1408     if (EnableVPlanNativePath)
1409       return false;
1410 
1411     auto ScalarsPerVF = Scalars.find(VF);
1412     assert(ScalarsPerVF != Scalars.end() &&
1413            "Scalar values are not calculated for VF");
1414     return ScalarsPerVF->second.count(I);
1415   }
1416 
1417   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1418   /// for vectorization factor \p VF.
1419   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1420     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1421            !isProfitableToScalarize(I, VF) &&
1422            !isScalarAfterVectorization(I, VF);
1423   }
1424 
1425   /// Decision that was taken during cost calculation for memory instruction.
1426   enum InstWidening {
1427     CM_Unknown,
1428     CM_Widen,         // For consecutive accesses with stride +1.
1429     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1430     CM_Interleave,
1431     CM_GatherScatter,
1432     CM_Scalarize
1433   };
1434 
1435   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1436   /// instruction \p I and vector width \p VF.
1437   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1438                            InstructionCost Cost) {
1439     assert(VF.isVector() && "Expected VF >=2");
1440     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1441   }
1442 
1443   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1444   /// interleaving group \p Grp and vector width \p VF.
1445   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1446                            ElementCount VF, InstWidening W,
1447                            InstructionCost Cost) {
1448     assert(VF.isVector() && "Expected VF >=2");
1449     /// Broadcast this decicion to all instructions inside the group.
1450     /// But the cost will be assigned to one instruction only.
1451     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1452       if (auto *I = Grp->getMember(i)) {
1453         if (Grp->getInsertPos() == I)
1454           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1455         else
1456           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1457       }
1458     }
1459   }
1460 
1461   /// Return the cost model decision for the given instruction \p I and vector
1462   /// width \p VF. Return CM_Unknown if this instruction did not pass
1463   /// through the cost modeling.
1464   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1465     assert(VF.isVector() && "Expected VF to be a vector VF");
1466     // Cost model is not run in the VPlan-native path - return conservative
1467     // result until this changes.
1468     if (EnableVPlanNativePath)
1469       return CM_GatherScatter;
1470 
1471     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1472     auto Itr = WideningDecisions.find(InstOnVF);
1473     if (Itr == WideningDecisions.end())
1474       return CM_Unknown;
1475     return Itr->second.first;
1476   }
1477 
1478   /// Return the vectorization cost for the given instruction \p I and vector
1479   /// width \p VF.
1480   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1481     assert(VF.isVector() && "Expected VF >=2");
1482     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1483     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1484            "The cost is not calculated");
1485     return WideningDecisions[InstOnVF].second;
1486   }
1487 
1488   /// Return True if instruction \p I is an optimizable truncate whose operand
1489   /// is an induction variable. Such a truncate will be removed by adding a new
1490   /// induction variable with the destination type.
1491   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1492     // If the instruction is not a truncate, return false.
1493     auto *Trunc = dyn_cast<TruncInst>(I);
1494     if (!Trunc)
1495       return false;
1496 
1497     // Get the source and destination types of the truncate.
1498     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1499     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1500 
1501     // If the truncate is free for the given types, return false. Replacing a
1502     // free truncate with an induction variable would add an induction variable
1503     // update instruction to each iteration of the loop. We exclude from this
1504     // check the primary induction variable since it will need an update
1505     // instruction regardless.
1506     Value *Op = Trunc->getOperand(0);
1507     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1508       return false;
1509 
1510     // If the truncated value is not an induction variable, return false.
1511     return Legal->isInductionPhi(Op);
1512   }
1513 
1514   /// Collects the instructions to scalarize for each predicated instruction in
1515   /// the loop.
1516   void collectInstsToScalarize(ElementCount VF);
1517 
1518   /// Collect Uniform and Scalar values for the given \p VF.
1519   /// The sets depend on CM decision for Load/Store instructions
1520   /// that may be vectorized as interleave, gather-scatter or scalarized.
1521   void collectUniformsAndScalars(ElementCount VF) {
1522     // Do the analysis once.
1523     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1524       return;
1525     setCostBasedWideningDecision(VF);
1526     collectLoopUniforms(VF);
1527     collectLoopScalars(VF);
1528   }
1529 
1530   /// Returns true if the target machine supports masked store operation
1531   /// for the given \p DataType and kind of access to \p Ptr.
1532   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1533     return Legal->isConsecutivePtr(DataType, Ptr) &&
1534            TTI.isLegalMaskedStore(DataType, Alignment);
1535   }
1536 
1537   /// Returns true if the target machine supports masked load operation
1538   /// for the given \p DataType and kind of access to \p Ptr.
1539   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1540     return Legal->isConsecutivePtr(DataType, Ptr) &&
1541            TTI.isLegalMaskedLoad(DataType, Alignment);
1542   }
1543 
1544   /// Returns true if the target machine can represent \p V as a masked gather
1545   /// or scatter operation.
1546   bool isLegalGatherOrScatter(Value *V,
1547                               ElementCount VF = ElementCount::getFixed(1)) {
1548     bool LI = isa<LoadInst>(V);
1549     bool SI = isa<StoreInst>(V);
1550     if (!LI && !SI)
1551       return false;
1552     auto *Ty = getLoadStoreType(V);
1553     Align Align = getLoadStoreAlignment(V);
1554     if (VF.isVector())
1555       Ty = VectorType::get(Ty, VF);
1556     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1557            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1558   }
1559 
1560   /// Returns true if the target machine supports all of the reduction
1561   /// variables found for the given VF.
1562   bool canVectorizeReductions(ElementCount VF) const {
1563     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1564       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1565       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1566     }));
1567   }
1568 
1569   /// Returns true if \p I is an instruction that will be scalarized with
1570   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1571   /// instructions include conditional stores and instructions that may divide
1572   /// by zero.
1573   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1574 
1575   // Returns true if \p I is an instruction that will be predicated either
1576   // through scalar predication or masked load/store or masked gather/scatter.
1577   // \p VF is the vectorization factor that will be used to vectorize \p I.
1578   // Superset of instructions that return true for isScalarWithPredication.
1579   bool isPredicatedInst(Instruction *I, ElementCount VF,
1580                         bool IsKnownUniform = false) {
1581     // When we know the load is uniform and the original scalar loop was not
1582     // predicated we don't need to mark it as a predicated instruction. Any
1583     // vectorised blocks created when tail-folding are something artificial we
1584     // have introduced and we know there is always at least one active lane.
1585     // That's why we call Legal->blockNeedsPredication here because it doesn't
1586     // query tail-folding.
1587     if (IsKnownUniform && isa<LoadInst>(I) &&
1588         !Legal->blockNeedsPredication(I->getParent()))
1589       return false;
1590     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1591       return false;
1592     // Loads and stores that need some form of masked operation are predicated
1593     // instructions.
1594     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1595       return Legal->isMaskRequired(I);
1596     return isScalarWithPredication(I, VF);
1597   }
1598 
1599   /// Returns true if \p I is a memory instruction with consecutive memory
1600   /// access that can be widened.
1601   bool
1602   memoryInstructionCanBeWidened(Instruction *I,
1603                                 ElementCount VF = ElementCount::getFixed(1));
1604 
1605   /// Returns true if \p I is a memory instruction in an interleaved-group
1606   /// of memory accesses that can be vectorized with wide vector loads/stores
1607   /// and shuffles.
1608   bool
1609   interleavedAccessCanBeWidened(Instruction *I,
1610                                 ElementCount VF = ElementCount::getFixed(1));
1611 
1612   /// Check if \p Instr belongs to any interleaved access group.
1613   bool isAccessInterleaved(Instruction *Instr) {
1614     return InterleaveInfo.isInterleaved(Instr);
1615   }
1616 
1617   /// Get the interleaved access group that \p Instr belongs to.
1618   const InterleaveGroup<Instruction> *
1619   getInterleavedAccessGroup(Instruction *Instr) {
1620     return InterleaveInfo.getInterleaveGroup(Instr);
1621   }
1622 
1623   /// Returns true if we're required to use a scalar epilogue for at least
1624   /// the final iteration of the original loop.
1625   bool requiresScalarEpilogue(ElementCount VF) const {
1626     if (!isScalarEpilogueAllowed())
1627       return false;
1628     // If we might exit from anywhere but the latch, must run the exiting
1629     // iteration in scalar form.
1630     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1631       return true;
1632     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1633   }
1634 
1635   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1636   /// loop hint annotation.
1637   bool isScalarEpilogueAllowed() const {
1638     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1639   }
1640 
1641   /// Returns true if all loop blocks should be masked to fold tail loop.
1642   bool foldTailByMasking() const { return FoldTailByMasking; }
1643 
1644   /// Returns true if the instructions in this block requires predication
1645   /// for any reason, e.g. because tail folding now requires a predicate
1646   /// or because the block in the original loop was predicated.
1647   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1648     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1649   }
1650 
1651   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1652   /// nodes to the chain of instructions representing the reductions. Uses a
1653   /// MapVector to ensure deterministic iteration order.
1654   using ReductionChainMap =
1655       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1656 
1657   /// Return the chain of instructions representing an inloop reduction.
1658   const ReductionChainMap &getInLoopReductionChains() const {
1659     return InLoopReductionChains;
1660   }
1661 
1662   /// Returns true if the Phi is part of an inloop reduction.
1663   bool isInLoopReduction(PHINode *Phi) const {
1664     return InLoopReductionChains.count(Phi);
1665   }
1666 
1667   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1668   /// with factor VF.  Return the cost of the instruction, including
1669   /// scalarization overhead if it's needed.
1670   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1671 
1672   /// Estimate cost of a call instruction CI if it were vectorized with factor
1673   /// VF. Return the cost of the instruction, including scalarization overhead
1674   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1675   /// scalarized -
1676   /// i.e. either vector version isn't available, or is too expensive.
1677   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1678                                     bool &NeedToScalarize) const;
1679 
1680   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1681   /// that of B.
1682   bool isMoreProfitable(const VectorizationFactor &A,
1683                         const VectorizationFactor &B) const;
1684 
1685   /// Invalidates decisions already taken by the cost model.
1686   void invalidateCostModelingDecisions() {
1687     WideningDecisions.clear();
1688     Uniforms.clear();
1689     Scalars.clear();
1690   }
1691 
1692 private:
1693   unsigned NumPredStores = 0;
1694 
1695   /// \return An upper bound for the vectorization factors for both
1696   /// fixed and scalable vectorization, where the minimum-known number of
1697   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1698   /// disabled or unsupported, then the scalable part will be equal to
1699   /// ElementCount::getScalable(0).
1700   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1701                                            ElementCount UserVF,
1702                                            bool FoldTailByMasking);
1703 
1704   /// \return the maximized element count based on the targets vector
1705   /// registers and the loop trip-count, but limited to a maximum safe VF.
1706   /// This is a helper function of computeFeasibleMaxVF.
1707   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1708   /// issue that occurred on one of the buildbots which cannot be reproduced
1709   /// without having access to the properietary compiler (see comments on
1710   /// D98509). The issue is currently under investigation and this workaround
1711   /// will be removed as soon as possible.
1712   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1713                                        unsigned SmallestType,
1714                                        unsigned WidestType,
1715                                        const ElementCount &MaxSafeVF,
1716                                        bool FoldTailByMasking);
1717 
1718   /// \return the maximum legal scalable VF, based on the safe max number
1719   /// of elements.
1720   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1721 
1722   /// The vectorization cost is a combination of the cost itself and a boolean
1723   /// indicating whether any of the contributing operations will actually
1724   /// operate on vector values after type legalization in the backend. If this
1725   /// latter value is false, then all operations will be scalarized (i.e. no
1726   /// vectorization has actually taken place).
1727   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1728 
1729   /// Returns the expected execution cost. The unit of the cost does
1730   /// not matter because we use the 'cost' units to compare different
1731   /// vector widths. The cost that is returned is *not* normalized by
1732   /// the factor width. If \p Invalid is not nullptr, this function
1733   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1734   /// each instruction that has an Invalid cost for the given VF.
1735   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1736   VectorizationCostTy
1737   expectedCost(ElementCount VF,
1738                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1739 
1740   /// Returns the execution time cost of an instruction for a given vector
1741   /// width. Vector width of one means scalar.
1742   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1743 
1744   /// The cost-computation logic from getInstructionCost which provides
1745   /// the vector type as an output parameter.
1746   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1747                                      Type *&VectorTy);
1748 
1749   /// Return the cost of instructions in an inloop reduction pattern, if I is
1750   /// part of that pattern.
1751   Optional<InstructionCost>
1752   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1753                           TTI::TargetCostKind CostKind);
1754 
1755   /// Calculate vectorization cost of memory instruction \p I.
1756   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1757 
1758   /// The cost computation for scalarized memory instruction.
1759   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1760 
1761   /// The cost computation for interleaving group of memory instructions.
1762   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1763 
1764   /// The cost computation for Gather/Scatter instruction.
1765   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1766 
1767   /// The cost computation for widening instruction \p I with consecutive
1768   /// memory access.
1769   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1770 
1771   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1772   /// Load: scalar load + broadcast.
1773   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1774   /// element)
1775   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1776 
1777   /// Estimate the overhead of scalarizing an instruction. This is a
1778   /// convenience wrapper for the type-based getScalarizationOverhead API.
1779   InstructionCost getScalarizationOverhead(Instruction *I,
1780                                            ElementCount VF) const;
1781 
1782   /// Returns whether the instruction is a load or store and will be a emitted
1783   /// as a vector operation.
1784   bool isConsecutiveLoadOrStore(Instruction *I);
1785 
1786   /// Returns true if an artificially high cost for emulated masked memrefs
1787   /// should be used.
1788   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1789 
1790   /// Map of scalar integer values to the smallest bitwidth they can be legally
1791   /// represented as. The vector equivalents of these values should be truncated
1792   /// to this type.
1793   MapVector<Instruction *, uint64_t> MinBWs;
1794 
1795   /// A type representing the costs for instructions if they were to be
1796   /// scalarized rather than vectorized. The entries are Instruction-Cost
1797   /// pairs.
1798   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1799 
1800   /// A set containing all BasicBlocks that are known to present after
1801   /// vectorization as a predicated block.
1802   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1803 
1804   /// Records whether it is allowed to have the original scalar loop execute at
1805   /// least once. This may be needed as a fallback loop in case runtime
1806   /// aliasing/dependence checks fail, or to handle the tail/remainder
1807   /// iterations when the trip count is unknown or doesn't divide by the VF,
1808   /// or as a peel-loop to handle gaps in interleave-groups.
1809   /// Under optsize and when the trip count is very small we don't allow any
1810   /// iterations to execute in the scalar loop.
1811   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1812 
1813   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1814   bool FoldTailByMasking = false;
1815 
1816   /// A map holding scalar costs for different vectorization factors. The
1817   /// presence of a cost for an instruction in the mapping indicates that the
1818   /// instruction will be scalarized when vectorizing with the associated
1819   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1820   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1821 
1822   /// Holds the instructions known to be uniform after vectorization.
1823   /// The data is collected per VF.
1824   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1825 
1826   /// Holds the instructions known to be scalar after vectorization.
1827   /// The data is collected per VF.
1828   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1829 
1830   /// Holds the instructions (address computations) that are forced to be
1831   /// scalarized.
1832   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1833 
1834   /// PHINodes of the reductions that should be expanded in-loop along with
1835   /// their associated chains of reduction operations, in program order from top
1836   /// (PHI) to bottom
1837   ReductionChainMap InLoopReductionChains;
1838 
1839   /// A Map of inloop reduction operations and their immediate chain operand.
1840   /// FIXME: This can be removed once reductions can be costed correctly in
1841   /// vplan. This was added to allow quick lookup to the inloop operations,
1842   /// without having to loop through InLoopReductionChains.
1843   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1844 
1845   /// Returns the expected difference in cost from scalarizing the expression
1846   /// feeding a predicated instruction \p PredInst. The instructions to
1847   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1848   /// non-negative return value implies the expression will be scalarized.
1849   /// Currently, only single-use chains are considered for scalarization.
1850   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1851                               ElementCount VF);
1852 
1853   /// Collect the instructions that are uniform after vectorization. An
1854   /// instruction is uniform if we represent it with a single scalar value in
1855   /// the vectorized loop corresponding to each vector iteration. Examples of
1856   /// uniform instructions include pointer operands of consecutive or
1857   /// interleaved memory accesses. Note that although uniformity implies an
1858   /// instruction will be scalar, the reverse is not true. In general, a
1859   /// scalarized instruction will be represented by VF scalar values in the
1860   /// vectorized loop, each corresponding to an iteration of the original
1861   /// scalar loop.
1862   void collectLoopUniforms(ElementCount VF);
1863 
1864   /// Collect the instructions that are scalar after vectorization. An
1865   /// instruction is scalar if it is known to be uniform or will be scalarized
1866   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1867   /// to the list if they are used by a load/store instruction that is marked as
1868   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1869   /// VF values in the vectorized loop, each corresponding to an iteration of
1870   /// the original scalar loop.
1871   void collectLoopScalars(ElementCount VF);
1872 
1873   /// Keeps cost model vectorization decision and cost for instructions.
1874   /// Right now it is used for memory instructions only.
1875   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1876                                 std::pair<InstWidening, InstructionCost>>;
1877 
1878   DecisionList WideningDecisions;
1879 
1880   /// Returns true if \p V is expected to be vectorized and it needs to be
1881   /// extracted.
1882   bool needsExtract(Value *V, ElementCount VF) const {
1883     Instruction *I = dyn_cast<Instruction>(V);
1884     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1885         TheLoop->isLoopInvariant(I))
1886       return false;
1887 
1888     // Assume we can vectorize V (and hence we need extraction) if the
1889     // scalars are not computed yet. This can happen, because it is called
1890     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1891     // the scalars are collected. That should be a safe assumption in most
1892     // cases, because we check if the operands have vectorizable types
1893     // beforehand in LoopVectorizationLegality.
1894     return Scalars.find(VF) == Scalars.end() ||
1895            !isScalarAfterVectorization(I, VF);
1896   };
1897 
1898   /// Returns a range containing only operands needing to be extracted.
1899   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1900                                                    ElementCount VF) const {
1901     return SmallVector<Value *, 4>(make_filter_range(
1902         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1903   }
1904 
1905   /// Determines if we have the infrastructure to vectorize loop \p L and its
1906   /// epilogue, assuming the main loop is vectorized by \p VF.
1907   bool isCandidateForEpilogueVectorization(const Loop &L,
1908                                            const ElementCount VF) const;
1909 
1910   /// Returns true if epilogue vectorization is considered profitable, and
1911   /// false otherwise.
1912   /// \p VF is the vectorization factor chosen for the original loop.
1913   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1914 
1915 public:
1916   /// The loop that we evaluate.
1917   Loop *TheLoop;
1918 
1919   /// Predicated scalar evolution analysis.
1920   PredicatedScalarEvolution &PSE;
1921 
1922   /// Loop Info analysis.
1923   LoopInfo *LI;
1924 
1925   /// Vectorization legality.
1926   LoopVectorizationLegality *Legal;
1927 
1928   /// Vector target information.
1929   const TargetTransformInfo &TTI;
1930 
1931   /// Target Library Info.
1932   const TargetLibraryInfo *TLI;
1933 
1934   /// Demanded bits analysis.
1935   DemandedBits *DB;
1936 
1937   /// Assumption cache.
1938   AssumptionCache *AC;
1939 
1940   /// Interface to emit optimization remarks.
1941   OptimizationRemarkEmitter *ORE;
1942 
1943   const Function *TheFunction;
1944 
1945   /// Loop Vectorize Hint.
1946   const LoopVectorizeHints *Hints;
1947 
1948   /// The interleave access information contains groups of interleaved accesses
1949   /// with the same stride and close to each other.
1950   InterleavedAccessInfo &InterleaveInfo;
1951 
1952   /// Values to ignore in the cost model.
1953   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1954 
1955   /// Values to ignore in the cost model when VF > 1.
1956   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1957 
1958   /// All element types found in the loop.
1959   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1960 
1961   /// Profitable vector factors.
1962   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1963 };
1964 } // end namespace llvm
1965 
1966 /// Helper struct to manage generating runtime checks for vectorization.
1967 ///
1968 /// The runtime checks are created up-front in temporary blocks to allow better
1969 /// estimating the cost and un-linked from the existing IR. After deciding to
1970 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1971 /// temporary blocks are completely removed.
1972 class GeneratedRTChecks {
1973   /// Basic block which contains the generated SCEV checks, if any.
1974   BasicBlock *SCEVCheckBlock = nullptr;
1975 
1976   /// The value representing the result of the generated SCEV checks. If it is
1977   /// nullptr, either no SCEV checks have been generated or they have been used.
1978   Value *SCEVCheckCond = nullptr;
1979 
1980   /// Basic block which contains the generated memory runtime checks, if any.
1981   BasicBlock *MemCheckBlock = nullptr;
1982 
1983   /// The value representing the result of the generated memory runtime checks.
1984   /// If it is nullptr, either no memory runtime checks have been generated or
1985   /// they have been used.
1986   Value *MemRuntimeCheckCond = nullptr;
1987 
1988   DominatorTree *DT;
1989   LoopInfo *LI;
1990 
1991   SCEVExpander SCEVExp;
1992   SCEVExpander MemCheckExp;
1993 
1994 public:
1995   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1996                     const DataLayout &DL)
1997       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1998         MemCheckExp(SE, DL, "scev.check") {}
1999 
2000   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
2001   /// accurately estimate the cost of the runtime checks. The blocks are
2002   /// un-linked from the IR and is added back during vector code generation. If
2003   /// there is no vector code generation, the check blocks are removed
2004   /// completely.
2005   void Create(Loop *L, const LoopAccessInfo &LAI,
2006               const SCEVUnionPredicate &UnionPred) {
2007 
2008     BasicBlock *LoopHeader = L->getHeader();
2009     BasicBlock *Preheader = L->getLoopPreheader();
2010 
2011     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
2012     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
2013     // may be used by SCEVExpander. The blocks will be un-linked from their
2014     // predecessors and removed from LI & DT at the end of the function.
2015     if (!UnionPred.isAlwaysTrue()) {
2016       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
2017                                   nullptr, "vector.scevcheck");
2018 
2019       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
2020           &UnionPred, SCEVCheckBlock->getTerminator());
2021     }
2022 
2023     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
2024     if (RtPtrChecking.Need) {
2025       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2026       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2027                                  "vector.memcheck");
2028 
2029       MemRuntimeCheckCond =
2030           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2031                            RtPtrChecking.getChecks(), MemCheckExp);
2032       assert(MemRuntimeCheckCond &&
2033              "no RT checks generated although RtPtrChecking "
2034              "claimed checks are required");
2035     }
2036 
2037     if (!MemCheckBlock && !SCEVCheckBlock)
2038       return;
2039 
2040     // Unhook the temporary block with the checks, update various places
2041     // accordingly.
2042     if (SCEVCheckBlock)
2043       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2044     if (MemCheckBlock)
2045       MemCheckBlock->replaceAllUsesWith(Preheader);
2046 
2047     if (SCEVCheckBlock) {
2048       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2049       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2050       Preheader->getTerminator()->eraseFromParent();
2051     }
2052     if (MemCheckBlock) {
2053       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2054       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2055       Preheader->getTerminator()->eraseFromParent();
2056     }
2057 
2058     DT->changeImmediateDominator(LoopHeader, Preheader);
2059     if (MemCheckBlock) {
2060       DT->eraseNode(MemCheckBlock);
2061       LI->removeBlock(MemCheckBlock);
2062     }
2063     if (SCEVCheckBlock) {
2064       DT->eraseNode(SCEVCheckBlock);
2065       LI->removeBlock(SCEVCheckBlock);
2066     }
2067   }
2068 
2069   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2070   /// unused.
2071   ~GeneratedRTChecks() {
2072     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2073     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2074     if (!SCEVCheckCond)
2075       SCEVCleaner.markResultUsed();
2076 
2077     if (!MemRuntimeCheckCond)
2078       MemCheckCleaner.markResultUsed();
2079 
2080     if (MemRuntimeCheckCond) {
2081       auto &SE = *MemCheckExp.getSE();
2082       // Memory runtime check generation creates compares that use expanded
2083       // values. Remove them before running the SCEVExpanderCleaners.
2084       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2085         if (MemCheckExp.isInsertedInstruction(&I))
2086           continue;
2087         SE.forgetValue(&I);
2088         I.eraseFromParent();
2089       }
2090     }
2091     MemCheckCleaner.cleanup();
2092     SCEVCleaner.cleanup();
2093 
2094     if (SCEVCheckCond)
2095       SCEVCheckBlock->eraseFromParent();
2096     if (MemRuntimeCheckCond)
2097       MemCheckBlock->eraseFromParent();
2098   }
2099 
2100   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2101   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2102   /// depending on the generated condition.
2103   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2104                              BasicBlock *LoopVectorPreHeader,
2105                              BasicBlock *LoopExitBlock) {
2106     if (!SCEVCheckCond)
2107       return nullptr;
2108     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2109       if (C->isZero())
2110         return nullptr;
2111 
2112     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2113 
2114     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2115     // Create new preheader for vector loop.
2116     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2117       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2118 
2119     SCEVCheckBlock->getTerminator()->eraseFromParent();
2120     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2121     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2122                                                 SCEVCheckBlock);
2123 
2124     DT->addNewBlock(SCEVCheckBlock, Pred);
2125     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2126 
2127     ReplaceInstWithInst(
2128         SCEVCheckBlock->getTerminator(),
2129         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2130     // Mark the check as used, to prevent it from being removed during cleanup.
2131     SCEVCheckCond = nullptr;
2132     return SCEVCheckBlock;
2133   }
2134 
2135   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2136   /// the branches to branch to the vector preheader or \p Bypass, depending on
2137   /// the generated condition.
2138   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2139                                    BasicBlock *LoopVectorPreHeader) {
2140     // Check if we generated code that checks in runtime if arrays overlap.
2141     if (!MemRuntimeCheckCond)
2142       return nullptr;
2143 
2144     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2145     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2146                                                 MemCheckBlock);
2147 
2148     DT->addNewBlock(MemCheckBlock, Pred);
2149     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2150     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2151 
2152     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2153       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2154 
2155     ReplaceInstWithInst(
2156         MemCheckBlock->getTerminator(),
2157         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2158     MemCheckBlock->getTerminator()->setDebugLoc(
2159         Pred->getTerminator()->getDebugLoc());
2160 
2161     // Mark the check as used, to prevent it from being removed during cleanup.
2162     MemRuntimeCheckCond = nullptr;
2163     return MemCheckBlock;
2164   }
2165 };
2166 
2167 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2168 // vectorization. The loop needs to be annotated with #pragma omp simd
2169 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2170 // vector length information is not provided, vectorization is not considered
2171 // explicit. Interleave hints are not allowed either. These limitations will be
2172 // relaxed in the future.
2173 // Please, note that we are currently forced to abuse the pragma 'clang
2174 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2175 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2176 // provides *explicit vectorization hints* (LV can bypass legal checks and
2177 // assume that vectorization is legal). However, both hints are implemented
2178 // using the same metadata (llvm.loop.vectorize, processed by
2179 // LoopVectorizeHints). This will be fixed in the future when the native IR
2180 // representation for pragma 'omp simd' is introduced.
2181 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2182                                    OptimizationRemarkEmitter *ORE) {
2183   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2184   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2185 
2186   // Only outer loops with an explicit vectorization hint are supported.
2187   // Unannotated outer loops are ignored.
2188   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2189     return false;
2190 
2191   Function *Fn = OuterLp->getHeader()->getParent();
2192   if (!Hints.allowVectorization(Fn, OuterLp,
2193                                 true /*VectorizeOnlyWhenForced*/)) {
2194     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2195     return false;
2196   }
2197 
2198   if (Hints.getInterleave() > 1) {
2199     // TODO: Interleave support is future work.
2200     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2201                          "outer loops.\n");
2202     Hints.emitRemarkWithHints();
2203     return false;
2204   }
2205 
2206   return true;
2207 }
2208 
2209 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2210                                   OptimizationRemarkEmitter *ORE,
2211                                   SmallVectorImpl<Loop *> &V) {
2212   // Collect inner loops and outer loops without irreducible control flow. For
2213   // now, only collect outer loops that have explicit vectorization hints. If we
2214   // are stress testing the VPlan H-CFG construction, we collect the outermost
2215   // loop of every loop nest.
2216   if (L.isInnermost() || VPlanBuildStressTest ||
2217       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2218     LoopBlocksRPO RPOT(&L);
2219     RPOT.perform(LI);
2220     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2221       V.push_back(&L);
2222       // TODO: Collect inner loops inside marked outer loops in case
2223       // vectorization fails for the outer loop. Do not invoke
2224       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2225       // already known to be reducible. We can use an inherited attribute for
2226       // that.
2227       return;
2228     }
2229   }
2230   for (Loop *InnerL : L)
2231     collectSupportedLoops(*InnerL, LI, ORE, V);
2232 }
2233 
2234 namespace {
2235 
2236 /// The LoopVectorize Pass.
2237 struct LoopVectorize : public FunctionPass {
2238   /// Pass identification, replacement for typeid
2239   static char ID;
2240 
2241   LoopVectorizePass Impl;
2242 
2243   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2244                          bool VectorizeOnlyWhenForced = false)
2245       : FunctionPass(ID),
2246         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2247     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2248   }
2249 
2250   bool runOnFunction(Function &F) override {
2251     if (skipFunction(F))
2252       return false;
2253 
2254     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2255     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2256     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2257     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2258     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2259     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2260     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2261     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2262     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2263     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2264     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2265     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2266     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2267 
2268     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2269         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2270 
2271     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2272                         GetLAA, *ORE, PSI).MadeAnyChange;
2273   }
2274 
2275   void getAnalysisUsage(AnalysisUsage &AU) const override {
2276     AU.addRequired<AssumptionCacheTracker>();
2277     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2278     AU.addRequired<DominatorTreeWrapperPass>();
2279     AU.addRequired<LoopInfoWrapperPass>();
2280     AU.addRequired<ScalarEvolutionWrapperPass>();
2281     AU.addRequired<TargetTransformInfoWrapperPass>();
2282     AU.addRequired<AAResultsWrapperPass>();
2283     AU.addRequired<LoopAccessLegacyAnalysis>();
2284     AU.addRequired<DemandedBitsWrapperPass>();
2285     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2286     AU.addRequired<InjectTLIMappingsLegacy>();
2287 
2288     // We currently do not preserve loopinfo/dominator analyses with outer loop
2289     // vectorization. Until this is addressed, mark these analyses as preserved
2290     // only for non-VPlan-native path.
2291     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2292     if (!EnableVPlanNativePath) {
2293       AU.addPreserved<LoopInfoWrapperPass>();
2294       AU.addPreserved<DominatorTreeWrapperPass>();
2295     }
2296 
2297     AU.addPreserved<BasicAAWrapperPass>();
2298     AU.addPreserved<GlobalsAAWrapperPass>();
2299     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2300   }
2301 };
2302 
2303 } // end anonymous namespace
2304 
2305 //===----------------------------------------------------------------------===//
2306 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2307 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2308 //===----------------------------------------------------------------------===//
2309 
2310 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2311   // We need to place the broadcast of invariant variables outside the loop,
2312   // but only if it's proven safe to do so. Else, broadcast will be inside
2313   // vector loop body.
2314   Instruction *Instr = dyn_cast<Instruction>(V);
2315   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2316                      (!Instr ||
2317                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2318   // Place the code for broadcasting invariant variables in the new preheader.
2319   IRBuilder<>::InsertPointGuard Guard(Builder);
2320   if (SafeToHoist)
2321     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2322 
2323   // Broadcast the scalar into all locations in the vector.
2324   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2325 
2326   return Shuf;
2327 }
2328 
2329 /// This function adds
2330 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2331 /// to each vector element of Val. The sequence starts at StartIndex.
2332 /// \p Opcode is relevant for FP induction variable.
2333 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2334                             Instruction::BinaryOps BinOp, ElementCount VF,
2335                             IRBuilder<> &Builder) {
2336   if (VF.isScalar()) {
2337     // When unrolling and the VF is 1, we only need to add a simple scalar.
2338     Type *Ty = Val->getType();
2339     assert(!Ty->isVectorTy() && "Val must be a scalar");
2340 
2341     if (Ty->isFloatingPointTy()) {
2342       // Floating-point operations inherit FMF via the builder's flags.
2343       Value *MulOp = Builder.CreateFMul(StartIdx, Step);
2344       return Builder.CreateBinOp(BinOp, Val, MulOp);
2345     }
2346     return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step),
2347                              "induction");
2348   }
2349 
2350   // Create and check the types.
2351   auto *ValVTy = cast<VectorType>(Val->getType());
2352   ElementCount VLen = ValVTy->getElementCount();
2353 
2354   Type *STy = Val->getType()->getScalarType();
2355   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2356          "Induction Step must be an integer or FP");
2357   assert(Step->getType() == STy && "Step has wrong type");
2358 
2359   SmallVector<Constant *, 8> Indices;
2360 
2361   // Create a vector of consecutive numbers from zero to VF.
2362   VectorType *InitVecValVTy = ValVTy;
2363   Type *InitVecValSTy = STy;
2364   if (STy->isFloatingPointTy()) {
2365     InitVecValSTy =
2366         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2367     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2368   }
2369   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2370 
2371   // Splat the StartIdx
2372   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2373 
2374   if (STy->isIntegerTy()) {
2375     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2376     Step = Builder.CreateVectorSplat(VLen, Step);
2377     assert(Step->getType() == Val->getType() && "Invalid step vec");
2378     // FIXME: The newly created binary instructions should contain nsw/nuw
2379     // flags, which can be found from the original scalar operations.
2380     Step = Builder.CreateMul(InitVec, Step);
2381     return Builder.CreateAdd(Val, Step, "induction");
2382   }
2383 
2384   // Floating point induction.
2385   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2386          "Binary Opcode should be specified for FP induction");
2387   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2388   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2389 
2390   Step = Builder.CreateVectorSplat(VLen, Step);
2391   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2392   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2393 }
2394 
2395 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2396     const InductionDescriptor &II, Value *Step, Value *Start,
2397     Instruction *EntryVal, VPValue *Def, VPTransformState &State) {
2398   IRBuilder<> &Builder = State.Builder;
2399   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2400          "Expected either an induction phi-node or a truncate of it!");
2401 
2402   // Construct the initial value of the vector IV in the vector loop preheader
2403   auto CurrIP = Builder.saveIP();
2404   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2405   if (isa<TruncInst>(EntryVal)) {
2406     assert(Start->getType()->isIntegerTy() &&
2407            "Truncation requires an integer type");
2408     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2409     Step = Builder.CreateTrunc(Step, TruncType);
2410     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2411   }
2412 
2413   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
2414   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
2415   Value *SteppedStart = getStepVector(
2416       SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder);
2417 
2418   // We create vector phi nodes for both integer and floating-point induction
2419   // variables. Here, we determine the kind of arithmetic we will perform.
2420   Instruction::BinaryOps AddOp;
2421   Instruction::BinaryOps MulOp;
2422   if (Step->getType()->isIntegerTy()) {
2423     AddOp = Instruction::Add;
2424     MulOp = Instruction::Mul;
2425   } else {
2426     AddOp = II.getInductionOpcode();
2427     MulOp = Instruction::FMul;
2428   }
2429 
2430   // Multiply the vectorization factor by the step using integer or
2431   // floating-point arithmetic as appropriate.
2432   Type *StepType = Step->getType();
2433   Value *RuntimeVF;
2434   if (Step->getType()->isFloatingPointTy())
2435     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
2436   else
2437     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
2438   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2439 
2440   // Create a vector splat to use in the induction update.
2441   //
2442   // FIXME: If the step is non-constant, we create the vector splat with
2443   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2444   //        handle a constant vector splat.
2445   Value *SplatVF = isa<Constant>(Mul)
2446                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
2447                        : Builder.CreateVectorSplat(State.VF, Mul);
2448   Builder.restoreIP(CurrIP);
2449 
2450   // We may need to add the step a number of times, depending on the unroll
2451   // factor. The last of those goes into the PHI.
2452   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2453                                     &*LoopVectorBody->getFirstInsertionPt());
2454   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2455   Instruction *LastInduction = VecInd;
2456   for (unsigned Part = 0; Part < UF; ++Part) {
2457     State.set(Def, LastInduction, Part);
2458 
2459     if (isa<TruncInst>(EntryVal))
2460       addMetadata(LastInduction, EntryVal);
2461 
2462     LastInduction = cast<Instruction>(
2463         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2464     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2465   }
2466 
2467   // Move the last step to the end of the latch block. This ensures consistent
2468   // placement of all induction updates.
2469   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2470   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2471   LastInduction->moveBefore(Br);
2472   LastInduction->setName("vec.ind.next");
2473 
2474   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2475   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2476 }
2477 
2478 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2479   return Cost->isScalarAfterVectorization(I, VF) ||
2480          Cost->isProfitableToScalarize(I, VF);
2481 }
2482 
2483 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2484   if (shouldScalarizeInstruction(IV))
2485     return true;
2486   auto isScalarInst = [&](User *U) -> bool {
2487     auto *I = cast<Instruction>(U);
2488     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2489   };
2490   return llvm::any_of(IV->users(), isScalarInst);
2491 }
2492 
2493 /// Returns true if \p ID starts at 0 and has a step of 1.
2494 static bool isCanonicalID(const InductionDescriptor &ID) {
2495   if (!ID.getConstIntStepValue() || !ID.getConstIntStepValue()->isOne())
2496     return false;
2497   auto *StartC = dyn_cast<ConstantInt>(ID.getStartValue());
2498   return StartC && StartC->isZero();
2499 }
2500 
2501 void InnerLoopVectorizer::widenIntOrFpInduction(
2502     PHINode *IV, const InductionDescriptor &ID, Value *Start, TruncInst *Trunc,
2503     VPValue *Def, VPTransformState &State, Value *CanonicalIV) {
2504   IRBuilder<> &Builder = State.Builder;
2505   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2506   assert(!State.VF.isZero() && "VF must be non-zero");
2507 
2508   // The value from the original loop to which we are mapping the new induction
2509   // variable.
2510   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2511 
2512   auto &DL = EntryVal->getModule()->getDataLayout();
2513 
2514   // Generate code for the induction step. Note that induction steps are
2515   // required to be loop-invariant
2516   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2517     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2518            "Induction step should be loop invariant");
2519     if (PSE.getSE()->isSCEVable(IV->getType())) {
2520       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2521       return Exp.expandCodeFor(Step, Step->getType(),
2522                                State.CFG.VectorPreHeader->getTerminator());
2523     }
2524     return cast<SCEVUnknown>(Step)->getValue();
2525   };
2526 
2527   // The scalar value to broadcast. This is derived from the canonical
2528   // induction variable. If a truncation type is given, truncate the canonical
2529   // induction variable and step. Otherwise, derive these values from the
2530   // induction descriptor.
2531   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2532     Value *ScalarIV = CanonicalIV;
2533     Type *NeededType = IV->getType();
2534     if (!isCanonicalID(ID) || ScalarIV->getType() != NeededType) {
2535       ScalarIV =
2536           NeededType->isIntegerTy()
2537               ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType)
2538               : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType);
2539       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID,
2540                                       State.CFG.PrevBB);
2541       ScalarIV->setName("offset.idx");
2542     }
2543     if (Trunc) {
2544       auto *TruncType = cast<IntegerType>(Trunc->getType());
2545       assert(Step->getType()->isIntegerTy() &&
2546              "Truncation requires an integer step");
2547       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2548       Step = Builder.CreateTrunc(Step, TruncType);
2549     }
2550     return ScalarIV;
2551   };
2552 
2553   // Create the vector values from the scalar IV, in the absence of creating a
2554   // vector IV.
2555   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2556     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2557     for (unsigned Part = 0; Part < UF; ++Part) {
2558       Value *StartIdx;
2559       if (Step->getType()->isFloatingPointTy())
2560         StartIdx =
2561             getRuntimeVFAsFloat(Builder, Step->getType(), State.VF * Part);
2562       else
2563         StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part);
2564 
2565       Value *EntryPart =
2566           getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(),
2567                         State.VF, State.Builder);
2568       State.set(Def, EntryPart, Part);
2569       if (Trunc)
2570         addMetadata(EntryPart, Trunc);
2571     }
2572   };
2573 
2574   // Fast-math-flags propagate from the original induction instruction.
2575   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2576   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2577     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2578 
2579   // Now do the actual transformations, and start with creating the step value.
2580   Value *Step = CreateStepValue(ID.getStep());
2581   if (State.VF.isScalar()) {
2582     Value *ScalarIV = CreateScalarIV(Step);
2583     CreateSplatIV(ScalarIV, Step);
2584     return;
2585   }
2586 
2587   // Determine if we want a scalar version of the induction variable. This is
2588   // true if the induction variable itself is not widened, or if it has at
2589   // least one user in the loop that is not widened.
2590   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2591   if (!NeedsScalarIV) {
2592     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2593     return;
2594   }
2595 
2596   // Try to create a new independent vector induction variable. If we can't
2597   // create the phi node, we will splat the scalar induction variable in each
2598   // loop iteration.
2599   if (!shouldScalarizeInstruction(EntryVal)) {
2600     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2601     Value *ScalarIV = CreateScalarIV(Step);
2602     // Create scalar steps that can be used by instructions we will later
2603     // scalarize. Note that the addition of the scalar steps will not increase
2604     // the number of instructions in the loop in the common case prior to
2605     // InstCombine. We will be trading one vector extract for each scalar step.
2606     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2607     return;
2608   }
2609 
2610   // All IV users are scalar instructions, so only emit a scalar IV, not a
2611   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2612   // predicate used by the masked loads/stores.
2613   Value *ScalarIV = CreateScalarIV(Step);
2614   if (!Cost->isScalarEpilogueAllowed())
2615     CreateSplatIV(ScalarIV, Step);
2616   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2617 }
2618 
2619 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2620                                            Instruction *EntryVal,
2621                                            const InductionDescriptor &ID,
2622                                            VPValue *Def,
2623                                            VPTransformState &State) {
2624   IRBuilder<> &Builder = State.Builder;
2625   // We shouldn't have to build scalar steps if we aren't vectorizing.
2626   assert(State.VF.isVector() && "VF should be greater than one");
2627   // Get the value type and ensure it and the step have the same integer type.
2628   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2629   assert(ScalarIVTy == Step->getType() &&
2630          "Val and Step should have the same type");
2631 
2632   // We build scalar steps for both integer and floating-point induction
2633   // variables. Here, we determine the kind of arithmetic we will perform.
2634   Instruction::BinaryOps AddOp;
2635   Instruction::BinaryOps MulOp;
2636   if (ScalarIVTy->isIntegerTy()) {
2637     AddOp = Instruction::Add;
2638     MulOp = Instruction::Mul;
2639   } else {
2640     AddOp = ID.getInductionOpcode();
2641     MulOp = Instruction::FMul;
2642   }
2643 
2644   // Determine the number of scalars we need to generate for each unroll
2645   // iteration. If EntryVal is uniform, we only need to generate the first
2646   // lane. Otherwise, we generate all VF values.
2647   bool IsUniform =
2648       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), State.VF);
2649   unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
2650   // Compute the scalar steps and save the results in State.
2651   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2652                                      ScalarIVTy->getScalarSizeInBits());
2653   Type *VecIVTy = nullptr;
2654   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2655   if (!IsUniform && State.VF.isScalable()) {
2656     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2657     UnitStepVec =
2658         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2659     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2660     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2661   }
2662 
2663   for (unsigned Part = 0; Part < State.UF; ++Part) {
2664     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2665 
2666     if (!IsUniform && State.VF.isScalable()) {
2667       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2668       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2669       if (ScalarIVTy->isFloatingPointTy())
2670         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2671       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2672       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2673       State.set(Def, Add, Part);
2674       // It's useful to record the lane values too for the known minimum number
2675       // of elements so we do those below. This improves the code quality when
2676       // trying to extract the first element, for example.
2677     }
2678 
2679     if (ScalarIVTy->isFloatingPointTy())
2680       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2681 
2682     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2683       Value *StartIdx = Builder.CreateBinOp(
2684           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2685       // The step returned by `createStepForVF` is a runtime-evaluated value
2686       // when VF is scalable. Otherwise, it should be folded into a Constant.
2687       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2688              "Expected StartIdx to be folded to a constant when VF is not "
2689              "scalable");
2690       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2691       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2692       State.set(Def, Add, VPIteration(Part, Lane));
2693     }
2694   }
2695 }
2696 
2697 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2698                                                     const VPIteration &Instance,
2699                                                     VPTransformState &State) {
2700   Value *ScalarInst = State.get(Def, Instance);
2701   Value *VectorValue = State.get(Def, Instance.Part);
2702   VectorValue = Builder.CreateInsertElement(
2703       VectorValue, ScalarInst,
2704       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2705   State.set(Def, VectorValue, Instance.Part);
2706 }
2707 
2708 // Return whether we allow using masked interleave-groups (for dealing with
2709 // strided loads/stores that reside in predicated blocks, or for dealing
2710 // with gaps).
2711 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2712   // If an override option has been passed in for interleaved accesses, use it.
2713   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2714     return EnableMaskedInterleavedMemAccesses;
2715 
2716   return TTI.enableMaskedInterleavedAccessVectorization();
2717 }
2718 
2719 // Try to vectorize the interleave group that \p Instr belongs to.
2720 //
2721 // E.g. Translate following interleaved load group (factor = 3):
2722 //   for (i = 0; i < N; i+=3) {
2723 //     R = Pic[i];             // Member of index 0
2724 //     G = Pic[i+1];           // Member of index 1
2725 //     B = Pic[i+2];           // Member of index 2
2726 //     ... // do something to R, G, B
2727 //   }
2728 // To:
2729 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2730 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2731 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2732 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2733 //
2734 // Or translate following interleaved store group (factor = 3):
2735 //   for (i = 0; i < N; i+=3) {
2736 //     ... do something to R, G, B
2737 //     Pic[i]   = R;           // Member of index 0
2738 //     Pic[i+1] = G;           // Member of index 1
2739 //     Pic[i+2] = B;           // Member of index 2
2740 //   }
2741 // To:
2742 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2743 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2744 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2745 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2746 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2747 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2748     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2749     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2750     VPValue *BlockInMask) {
2751   Instruction *Instr = Group->getInsertPos();
2752   const DataLayout &DL = Instr->getModule()->getDataLayout();
2753 
2754   // Prepare for the vector type of the interleaved load/store.
2755   Type *ScalarTy = getLoadStoreType(Instr);
2756   unsigned InterleaveFactor = Group->getFactor();
2757   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2758   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2759 
2760   // Prepare for the new pointers.
2761   SmallVector<Value *, 2> AddrParts;
2762   unsigned Index = Group->getIndex(Instr);
2763 
2764   // TODO: extend the masked interleaved-group support to reversed access.
2765   assert((!BlockInMask || !Group->isReverse()) &&
2766          "Reversed masked interleave-group not supported.");
2767 
2768   // If the group is reverse, adjust the index to refer to the last vector lane
2769   // instead of the first. We adjust the index from the first vector lane,
2770   // rather than directly getting the pointer for lane VF - 1, because the
2771   // pointer operand of the interleaved access is supposed to be uniform. For
2772   // uniform instructions, we're only required to generate a value for the
2773   // first vector lane in each unroll iteration.
2774   if (Group->isReverse())
2775     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2776 
2777   for (unsigned Part = 0; Part < UF; Part++) {
2778     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2779     setDebugLocFromInst(AddrPart);
2780 
2781     // Notice current instruction could be any index. Need to adjust the address
2782     // to the member of index 0.
2783     //
2784     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2785     //       b = A[i];       // Member of index 0
2786     // Current pointer is pointed to A[i+1], adjust it to A[i].
2787     //
2788     // E.g.  A[i+1] = a;     // Member of index 1
2789     //       A[i]   = b;     // Member of index 0
2790     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2791     // Current pointer is pointed to A[i+2], adjust it to A[i].
2792 
2793     bool InBounds = false;
2794     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2795       InBounds = gep->isInBounds();
2796     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2797     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2798 
2799     // Cast to the vector pointer type.
2800     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2801     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2802     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2803   }
2804 
2805   setDebugLocFromInst(Instr);
2806   Value *PoisonVec = PoisonValue::get(VecTy);
2807 
2808   Value *MaskForGaps = nullptr;
2809   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2810     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2811     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2812   }
2813 
2814   // Vectorize the interleaved load group.
2815   if (isa<LoadInst>(Instr)) {
2816     // For each unroll part, create a wide load for the group.
2817     SmallVector<Value *, 2> NewLoads;
2818     for (unsigned Part = 0; Part < UF; Part++) {
2819       Instruction *NewLoad;
2820       if (BlockInMask || MaskForGaps) {
2821         assert(useMaskedInterleavedAccesses(*TTI) &&
2822                "masked interleaved groups are not allowed.");
2823         Value *GroupMask = MaskForGaps;
2824         if (BlockInMask) {
2825           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2826           Value *ShuffledMask = Builder.CreateShuffleVector(
2827               BlockInMaskPart,
2828               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2829               "interleaved.mask");
2830           GroupMask = MaskForGaps
2831                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2832                                                 MaskForGaps)
2833                           : ShuffledMask;
2834         }
2835         NewLoad =
2836             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2837                                      GroupMask, PoisonVec, "wide.masked.vec");
2838       }
2839       else
2840         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2841                                             Group->getAlign(), "wide.vec");
2842       Group->addMetadata(NewLoad);
2843       NewLoads.push_back(NewLoad);
2844     }
2845 
2846     // For each member in the group, shuffle out the appropriate data from the
2847     // wide loads.
2848     unsigned J = 0;
2849     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2850       Instruction *Member = Group->getMember(I);
2851 
2852       // Skip the gaps in the group.
2853       if (!Member)
2854         continue;
2855 
2856       auto StrideMask =
2857           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2858       for (unsigned Part = 0; Part < UF; Part++) {
2859         Value *StridedVec = Builder.CreateShuffleVector(
2860             NewLoads[Part], StrideMask, "strided.vec");
2861 
2862         // If this member has different type, cast the result type.
2863         if (Member->getType() != ScalarTy) {
2864           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2865           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2866           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2867         }
2868 
2869         if (Group->isReverse())
2870           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2871 
2872         State.set(VPDefs[J], StridedVec, Part);
2873       }
2874       ++J;
2875     }
2876     return;
2877   }
2878 
2879   // The sub vector type for current instruction.
2880   auto *SubVT = VectorType::get(ScalarTy, VF);
2881 
2882   // Vectorize the interleaved store group.
2883   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2884   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2885          "masked interleaved groups are not allowed.");
2886   assert((!MaskForGaps || !VF.isScalable()) &&
2887          "masking gaps for scalable vectors is not yet supported.");
2888   for (unsigned Part = 0; Part < UF; Part++) {
2889     // Collect the stored vector from each member.
2890     SmallVector<Value *, 4> StoredVecs;
2891     for (unsigned i = 0; i < InterleaveFactor; i++) {
2892       assert((Group->getMember(i) || MaskForGaps) &&
2893              "Fail to get a member from an interleaved store group");
2894       Instruction *Member = Group->getMember(i);
2895 
2896       // Skip the gaps in the group.
2897       if (!Member) {
2898         Value *Undef = PoisonValue::get(SubVT);
2899         StoredVecs.push_back(Undef);
2900         continue;
2901       }
2902 
2903       Value *StoredVec = State.get(StoredValues[i], Part);
2904 
2905       if (Group->isReverse())
2906         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2907 
2908       // If this member has different type, cast it to a unified type.
2909 
2910       if (StoredVec->getType() != SubVT)
2911         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2912 
2913       StoredVecs.push_back(StoredVec);
2914     }
2915 
2916     // Concatenate all vectors into a wide vector.
2917     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2918 
2919     // Interleave the elements in the wide vector.
2920     Value *IVec = Builder.CreateShuffleVector(
2921         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2922         "interleaved.vec");
2923 
2924     Instruction *NewStoreInstr;
2925     if (BlockInMask || MaskForGaps) {
2926       Value *GroupMask = MaskForGaps;
2927       if (BlockInMask) {
2928         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2929         Value *ShuffledMask = Builder.CreateShuffleVector(
2930             BlockInMaskPart,
2931             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2932             "interleaved.mask");
2933         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2934                                                       ShuffledMask, MaskForGaps)
2935                                 : ShuffledMask;
2936       }
2937       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2938                                                 Group->getAlign(), GroupMask);
2939     } else
2940       NewStoreInstr =
2941           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2942 
2943     Group->addMetadata(NewStoreInstr);
2944   }
2945 }
2946 
2947 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2948                                                VPReplicateRecipe *RepRecipe,
2949                                                const VPIteration &Instance,
2950                                                bool IfPredicateInstr,
2951                                                VPTransformState &State) {
2952   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2953 
2954   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2955   // the first lane and part.
2956   if (isa<NoAliasScopeDeclInst>(Instr))
2957     if (!Instance.isFirstIteration())
2958       return;
2959 
2960   setDebugLocFromInst(Instr);
2961 
2962   // Does this instruction return a value ?
2963   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2964 
2965   Instruction *Cloned = Instr->clone();
2966   if (!IsVoidRetTy)
2967     Cloned->setName(Instr->getName() + ".cloned");
2968 
2969   // If the scalarized instruction contributes to the address computation of a
2970   // widen masked load/store which was in a basic block that needed predication
2971   // and is not predicated after vectorization, we can't propagate
2972   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2973   // instruction could feed a poison value to the base address of the widen
2974   // load/store.
2975   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2976     Cloned->dropPoisonGeneratingFlags();
2977 
2978   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2979                                Builder.GetInsertPoint());
2980   // Replace the operands of the cloned instructions with their scalar
2981   // equivalents in the new loop.
2982   for (auto &I : enumerate(RepRecipe->operands())) {
2983     auto InputInstance = Instance;
2984     VPValue *Operand = I.value();
2985     if (State.Plan->isUniformAfterVectorization(Operand))
2986       InputInstance.Lane = VPLane::getFirstLane();
2987     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2988   }
2989   addNewMetadata(Cloned, Instr);
2990 
2991   // Place the cloned scalar in the new loop.
2992   Builder.Insert(Cloned);
2993 
2994   State.set(RepRecipe, Cloned, Instance);
2995 
2996   // If we just cloned a new assumption, add it the assumption cache.
2997   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2998     AC->registerAssumption(II);
2999 
3000   // End if-block.
3001   if (IfPredicateInstr)
3002     PredicatedInstructions.push_back(Cloned);
3003 }
3004 
3005 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
3006   BasicBlock *Header = L->getHeader();
3007   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
3008 
3009   IRBuilder<> B(Header->getTerminator());
3010   Instruction *OldInst =
3011       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
3012   setDebugLocFromInst(OldInst, &B);
3013 
3014   // Connect the header to the exit and header blocks and replace the old
3015   // terminator.
3016   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
3017 
3018   // Now we have two terminators. Remove the old one from the block.
3019   Header->getTerminator()->eraseFromParent();
3020 }
3021 
3022 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3023   if (TripCount)
3024     return TripCount;
3025 
3026   assert(L && "Create Trip Count for null loop.");
3027   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3028   // Find the loop boundaries.
3029   ScalarEvolution *SE = PSE.getSE();
3030   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3031   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3032          "Invalid loop count");
3033 
3034   Type *IdxTy = Legal->getWidestInductionType();
3035   assert(IdxTy && "No type for induction");
3036 
3037   // The exit count might have the type of i64 while the phi is i32. This can
3038   // happen if we have an induction variable that is sign extended before the
3039   // compare. The only way that we get a backedge taken count is that the
3040   // induction variable was signed and as such will not overflow. In such a case
3041   // truncation is legal.
3042   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3043       IdxTy->getPrimitiveSizeInBits())
3044     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3045   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3046 
3047   // Get the total trip count from the count by adding 1.
3048   const SCEV *ExitCount = SE->getAddExpr(
3049       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3050 
3051   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3052 
3053   // Expand the trip count and place the new instructions in the preheader.
3054   // Notice that the pre-header does not change, only the loop body.
3055   SCEVExpander Exp(*SE, DL, "induction");
3056 
3057   // Count holds the overall loop count (N).
3058   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3059                                 L->getLoopPreheader()->getTerminator());
3060 
3061   if (TripCount->getType()->isPointerTy())
3062     TripCount =
3063         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3064                                     L->getLoopPreheader()->getTerminator());
3065 
3066   return TripCount;
3067 }
3068 
3069 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3070   if (VectorTripCount)
3071     return VectorTripCount;
3072 
3073   Value *TC = getOrCreateTripCount(L);
3074   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3075 
3076   Type *Ty = TC->getType();
3077   // This is where we can make the step a runtime constant.
3078   Value *Step = createStepForVF(Builder, Ty, VF, UF);
3079 
3080   // If the tail is to be folded by masking, round the number of iterations N
3081   // up to a multiple of Step instead of rounding down. This is done by first
3082   // adding Step-1 and then rounding down. Note that it's ok if this addition
3083   // overflows: the vector induction variable will eventually wrap to zero given
3084   // that it starts at zero and its Step is a power of two; the loop will then
3085   // exit, with the last early-exit vector comparison also producing all-true.
3086   if (Cost->foldTailByMasking()) {
3087     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3088            "VF*UF must be a power of 2 when folding tail by masking");
3089     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
3090     TC = Builder.CreateAdd(
3091         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
3092   }
3093 
3094   // Now we need to generate the expression for the part of the loop that the
3095   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3096   // iterations are not required for correctness, or N - Step, otherwise. Step
3097   // is equal to the vectorization factor (number of SIMD elements) times the
3098   // unroll factor (number of SIMD instructions).
3099   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3100 
3101   // There are cases where we *must* run at least one iteration in the remainder
3102   // loop.  See the cost model for when this can happen.  If the step evenly
3103   // divides the trip count, we set the remainder to be equal to the step. If
3104   // the step does not evenly divide the trip count, no adjustment is necessary
3105   // since there will already be scalar iterations. Note that the minimum
3106   // iterations check ensures that N >= Step.
3107   if (Cost->requiresScalarEpilogue(VF)) {
3108     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3109     R = Builder.CreateSelect(IsZero, Step, R);
3110   }
3111 
3112   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3113 
3114   return VectorTripCount;
3115 }
3116 
3117 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3118                                                    const DataLayout &DL) {
3119   // Verify that V is a vector type with same number of elements as DstVTy.
3120   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3121   unsigned VF = DstFVTy->getNumElements();
3122   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3123   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3124   Type *SrcElemTy = SrcVecTy->getElementType();
3125   Type *DstElemTy = DstFVTy->getElementType();
3126   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3127          "Vector elements must have same size");
3128 
3129   // Do a direct cast if element types are castable.
3130   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3131     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3132   }
3133   // V cannot be directly casted to desired vector type.
3134   // May happen when V is a floating point vector but DstVTy is a vector of
3135   // pointers or vice-versa. Handle this using a two-step bitcast using an
3136   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3137   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3138          "Only one type should be a pointer type");
3139   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3140          "Only one type should be a floating point type");
3141   Type *IntTy =
3142       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3143   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3144   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3145   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3146 }
3147 
3148 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3149                                                          BasicBlock *Bypass) {
3150   Value *Count = getOrCreateTripCount(L);
3151   // Reuse existing vector loop preheader for TC checks.
3152   // Note that new preheader block is generated for vector loop.
3153   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3154   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3155 
3156   // Generate code to check if the loop's trip count is less than VF * UF, or
3157   // equal to it in case a scalar epilogue is required; this implies that the
3158   // vector trip count is zero. This check also covers the case where adding one
3159   // to the backedge-taken count overflowed leading to an incorrect trip count
3160   // of zero. In this case we will also jump to the scalar loop.
3161   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3162                                             : ICmpInst::ICMP_ULT;
3163 
3164   // If tail is to be folded, vector loop takes care of all iterations.
3165   Value *CheckMinIters = Builder.getFalse();
3166   if (!Cost->foldTailByMasking()) {
3167     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3168     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3169   }
3170   // Create new preheader for vector loop.
3171   LoopVectorPreHeader =
3172       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3173                  "vector.ph");
3174 
3175   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3176                                DT->getNode(Bypass)->getIDom()) &&
3177          "TC check is expected to dominate Bypass");
3178 
3179   // Update dominator for Bypass & LoopExit (if needed).
3180   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3181   if (!Cost->requiresScalarEpilogue(VF))
3182     // If there is an epilogue which must run, there's no edge from the
3183     // middle block to exit blocks  and thus no need to update the immediate
3184     // dominator of the exit blocks.
3185     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3186 
3187   ReplaceInstWithInst(
3188       TCCheckBlock->getTerminator(),
3189       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3190   LoopBypassBlocks.push_back(TCCheckBlock);
3191 }
3192 
3193 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3194 
3195   BasicBlock *const SCEVCheckBlock =
3196       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3197   if (!SCEVCheckBlock)
3198     return nullptr;
3199 
3200   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3201            (OptForSizeBasedOnProfile &&
3202             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3203          "Cannot SCEV check stride or overflow when optimizing for size");
3204 
3205 
3206   // Update dominator only if this is first RT check.
3207   if (LoopBypassBlocks.empty()) {
3208     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3209     if (!Cost->requiresScalarEpilogue(VF))
3210       // If there is an epilogue which must run, there's no edge from the
3211       // middle block to exit blocks  and thus no need to update the immediate
3212       // dominator of the exit blocks.
3213       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3214   }
3215 
3216   LoopBypassBlocks.push_back(SCEVCheckBlock);
3217   AddedSafetyChecks = true;
3218   return SCEVCheckBlock;
3219 }
3220 
3221 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3222                                                       BasicBlock *Bypass) {
3223   // VPlan-native path does not do any analysis for runtime checks currently.
3224   if (EnableVPlanNativePath)
3225     return nullptr;
3226 
3227   BasicBlock *const MemCheckBlock =
3228       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3229 
3230   // Check if we generated code that checks in runtime if arrays overlap. We put
3231   // the checks into a separate block to make the more common case of few
3232   // elements faster.
3233   if (!MemCheckBlock)
3234     return nullptr;
3235 
3236   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3237     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3238            "Cannot emit memory checks when optimizing for size, unless forced "
3239            "to vectorize.");
3240     ORE->emit([&]() {
3241       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3242                                         L->getStartLoc(), L->getHeader())
3243              << "Code-size may be reduced by not forcing "
3244                 "vectorization, or by source-code modifications "
3245                 "eliminating the need for runtime checks "
3246                 "(e.g., adding 'restrict').";
3247     });
3248   }
3249 
3250   LoopBypassBlocks.push_back(MemCheckBlock);
3251 
3252   AddedSafetyChecks = true;
3253 
3254   // We currently don't use LoopVersioning for the actual loop cloning but we
3255   // still use it to add the noalias metadata.
3256   LVer = std::make_unique<LoopVersioning>(
3257       *Legal->getLAI(),
3258       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3259       DT, PSE.getSE());
3260   LVer->prepareNoAliasMetadata();
3261   return MemCheckBlock;
3262 }
3263 
3264 Value *InnerLoopVectorizer::emitTransformedIndex(
3265     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3266     const InductionDescriptor &ID, BasicBlock *VectorHeader) const {
3267 
3268   SCEVExpander Exp(*SE, DL, "induction");
3269   auto Step = ID.getStep();
3270   auto StartValue = ID.getStartValue();
3271   assert(Index->getType()->getScalarType() == Step->getType() &&
3272          "Index scalar type does not match StepValue type");
3273 
3274   // Note: the IR at this point is broken. We cannot use SE to create any new
3275   // SCEV and then expand it, hoping that SCEV's simplification will give us
3276   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3277   // lead to various SCEV crashes. So all we can do is to use builder and rely
3278   // on InstCombine for future simplifications. Here we handle some trivial
3279   // cases only.
3280   auto CreateAdd = [&B](Value *X, Value *Y) {
3281     assert(X->getType() == Y->getType() && "Types don't match!");
3282     if (auto *CX = dyn_cast<ConstantInt>(X))
3283       if (CX->isZero())
3284         return Y;
3285     if (auto *CY = dyn_cast<ConstantInt>(Y))
3286       if (CY->isZero())
3287         return X;
3288     return B.CreateAdd(X, Y);
3289   };
3290 
3291   // We allow X to be a vector type, in which case Y will potentially be
3292   // splatted into a vector with the same element count.
3293   auto CreateMul = [&B](Value *X, Value *Y) {
3294     assert(X->getType()->getScalarType() == Y->getType() &&
3295            "Types don't match!");
3296     if (auto *CX = dyn_cast<ConstantInt>(X))
3297       if (CX->isOne())
3298         return Y;
3299     if (auto *CY = dyn_cast<ConstantInt>(Y))
3300       if (CY->isOne())
3301         return X;
3302     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3303     if (XVTy && !isa<VectorType>(Y->getType()))
3304       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3305     return B.CreateMul(X, Y);
3306   };
3307 
3308   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3309   // loop, choose the end of the vector loop header (=VectorHeader), because
3310   // the DomTree is not kept up-to-date for additional blocks generated in the
3311   // vector loop. By using the header as insertion point, we guarantee that the
3312   // expanded instructions dominate all their uses.
3313   auto GetInsertPoint = [this, &B, VectorHeader]() {
3314     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3315     if (InsertBB != LoopVectorBody &&
3316         LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB))
3317       return VectorHeader->getTerminator();
3318     return &*B.GetInsertPoint();
3319   };
3320 
3321   switch (ID.getKind()) {
3322   case InductionDescriptor::IK_IntInduction: {
3323     assert(!isa<VectorType>(Index->getType()) &&
3324            "Vector indices not supported for integer inductions yet");
3325     assert(Index->getType() == StartValue->getType() &&
3326            "Index type does not match StartValue type");
3327     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3328       return B.CreateSub(StartValue, Index);
3329     auto *Offset = CreateMul(
3330         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3331     return CreateAdd(StartValue, Offset);
3332   }
3333   case InductionDescriptor::IK_PtrInduction: {
3334     assert(isa<SCEVConstant>(Step) &&
3335            "Expected constant step for pointer induction");
3336     return B.CreateGEP(
3337         ID.getElementType(), StartValue,
3338         CreateMul(Index,
3339                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3340                                     GetInsertPoint())));
3341   }
3342   case InductionDescriptor::IK_FpInduction: {
3343     assert(!isa<VectorType>(Index->getType()) &&
3344            "Vector indices not supported for FP inductions yet");
3345     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3346     auto InductionBinOp = ID.getInductionBinOp();
3347     assert(InductionBinOp &&
3348            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3349             InductionBinOp->getOpcode() == Instruction::FSub) &&
3350            "Original bin op should be defined for FP induction");
3351 
3352     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3353     Value *MulExp = B.CreateFMul(StepValue, Index);
3354     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3355                          "induction");
3356   }
3357   case InductionDescriptor::IK_NoInduction:
3358     return nullptr;
3359   }
3360   llvm_unreachable("invalid enum");
3361 }
3362 
3363 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3364   LoopScalarBody = OrigLoop->getHeader();
3365   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3366   assert(LoopVectorPreHeader && "Invalid loop structure");
3367   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3368   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3369          "multiple exit loop without required epilogue?");
3370 
3371   LoopMiddleBlock =
3372       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3373                  LI, nullptr, Twine(Prefix) + "middle.block");
3374   LoopScalarPreHeader =
3375       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3376                  nullptr, Twine(Prefix) + "scalar.ph");
3377 
3378   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3379 
3380   // Set up the middle block terminator.  Two cases:
3381   // 1) If we know that we must execute the scalar epilogue, emit an
3382   //    unconditional branch.
3383   // 2) Otherwise, we must have a single unique exit block (due to how we
3384   //    implement the multiple exit case).  In this case, set up a conditonal
3385   //    branch from the middle block to the loop scalar preheader, and the
3386   //    exit block.  completeLoopSkeleton will update the condition to use an
3387   //    iteration check, if required to decide whether to execute the remainder.
3388   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3389     BranchInst::Create(LoopScalarPreHeader) :
3390     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3391                        Builder.getTrue());
3392   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3393   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3394 
3395   // We intentionally don't let SplitBlock to update LoopInfo since
3396   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3397   // LoopVectorBody is explicitly added to the correct place few lines later.
3398   LoopVectorBody =
3399       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3400                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3401 
3402   // Update dominator for loop exit.
3403   if (!Cost->requiresScalarEpilogue(VF))
3404     // If there is an epilogue which must run, there's no edge from the
3405     // middle block to exit blocks  and thus no need to update the immediate
3406     // dominator of the exit blocks.
3407     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3408 
3409   // Create and register the new vector loop.
3410   Loop *Lp = LI->AllocateLoop();
3411   Loop *ParentLoop = OrigLoop->getParentLoop();
3412 
3413   // Insert the new loop into the loop nest and register the new basic blocks
3414   // before calling any utilities such as SCEV that require valid LoopInfo.
3415   if (ParentLoop) {
3416     ParentLoop->addChildLoop(Lp);
3417   } else {
3418     LI->addTopLevelLoop(Lp);
3419   }
3420   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3421   return Lp;
3422 }
3423 
3424 void InnerLoopVectorizer::createInductionResumeValues(
3425     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3426   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3427           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3428          "Inconsistent information about additional bypass.");
3429 
3430   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3431   assert(VectorTripCount && L && "Expected valid arguments");
3432   // We are going to resume the execution of the scalar loop.
3433   // Go over all of the induction variables that we found and fix the
3434   // PHIs that are left in the scalar version of the loop.
3435   // The starting values of PHI nodes depend on the counter of the last
3436   // iteration in the vectorized loop.
3437   // If we come from a bypass edge then we need to start from the original
3438   // start value.
3439   Instruction *OldInduction = Legal->getPrimaryInduction();
3440   for (auto &InductionEntry : Legal->getInductionVars()) {
3441     PHINode *OrigPhi = InductionEntry.first;
3442     InductionDescriptor II = InductionEntry.second;
3443 
3444     // Create phi nodes to merge from the  backedge-taken check block.
3445     PHINode *BCResumeVal =
3446         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3447                         LoopScalarPreHeader->getTerminator());
3448     // Copy original phi DL over to the new one.
3449     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3450     Value *&EndValue = IVEndValues[OrigPhi];
3451     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3452     if (OrigPhi == OldInduction) {
3453       // We know what the end value is.
3454       EndValue = VectorTripCount;
3455     } else {
3456       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3457 
3458       // Fast-math-flags propagate from the original induction instruction.
3459       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3460         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3461 
3462       Type *StepType = II.getStep()->getType();
3463       Instruction::CastOps CastOp =
3464           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3465       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3466       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3467       EndValue =
3468           emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3469       EndValue->setName("ind.end");
3470 
3471       // Compute the end value for the additional bypass (if applicable).
3472       if (AdditionalBypass.first) {
3473         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3474         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3475                                          StepType, true);
3476         CRD =
3477             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3478         EndValueFromAdditionalBypass =
3479             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3480         EndValueFromAdditionalBypass->setName("ind.end");
3481       }
3482     }
3483     // The new PHI merges the original incoming value, in case of a bypass,
3484     // or the value at the end of the vectorized loop.
3485     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3486 
3487     // Fix the scalar body counter (PHI node).
3488     // The old induction's phi node in the scalar body needs the truncated
3489     // value.
3490     for (BasicBlock *BB : LoopBypassBlocks)
3491       BCResumeVal->addIncoming(II.getStartValue(), BB);
3492 
3493     if (AdditionalBypass.first)
3494       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3495                                             EndValueFromAdditionalBypass);
3496 
3497     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3498   }
3499 }
3500 
3501 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3502                                                       MDNode *OrigLoopID) {
3503   assert(L && "Expected valid loop.");
3504 
3505   // The trip counts should be cached by now.
3506   Value *Count = getOrCreateTripCount(L);
3507   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3508 
3509   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3510 
3511   // Add a check in the middle block to see if we have completed
3512   // all of the iterations in the first vector loop.  Three cases:
3513   // 1) If we require a scalar epilogue, there is no conditional branch as
3514   //    we unconditionally branch to the scalar preheader.  Do nothing.
3515   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3516   //    Thus if tail is to be folded, we know we don't need to run the
3517   //    remainder and we can use the previous value for the condition (true).
3518   // 3) Otherwise, construct a runtime check.
3519   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3520     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3521                                         Count, VectorTripCount, "cmp.n",
3522                                         LoopMiddleBlock->getTerminator());
3523 
3524     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3525     // of the corresponding compare because they may have ended up with
3526     // different line numbers and we want to avoid awkward line stepping while
3527     // debugging. Eg. if the compare has got a line number inside the loop.
3528     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3529     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3530   }
3531 
3532   // Get ready to start creating new instructions into the vectorized body.
3533   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3534          "Inconsistent vector loop preheader");
3535   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3536 
3537 #ifdef EXPENSIVE_CHECKS
3538   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3539   LI->verify(*DT);
3540 #endif
3541 
3542   return LoopVectorPreHeader;
3543 }
3544 
3545 std::pair<BasicBlock *, Value *>
3546 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3547   /*
3548    In this function we generate a new loop. The new loop will contain
3549    the vectorized instructions while the old loop will continue to run the
3550    scalar remainder.
3551 
3552        [ ] <-- loop iteration number check.
3553     /   |
3554    /    v
3555   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3556   |  /  |
3557   | /   v
3558   ||   [ ]     <-- vector pre header.
3559   |/    |
3560   |     v
3561   |    [  ] \
3562   |    [  ]_|   <-- vector loop.
3563   |     |
3564   |     v
3565   \   -[ ]   <--- middle-block.
3566    \/   |
3567    /\   v
3568    | ->[ ]     <--- new preheader.
3569    |    |
3570  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3571    |   [ ] \
3572    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3573     \   |
3574      \  v
3575       >[ ]     <-- exit block(s).
3576    ...
3577    */
3578 
3579   // Get the metadata of the original loop before it gets modified.
3580   MDNode *OrigLoopID = OrigLoop->getLoopID();
3581 
3582   // Workaround!  Compute the trip count of the original loop and cache it
3583   // before we start modifying the CFG.  This code has a systemic problem
3584   // wherein it tries to run analysis over partially constructed IR; this is
3585   // wrong, and not simply for SCEV.  The trip count of the original loop
3586   // simply happens to be prone to hitting this in practice.  In theory, we
3587   // can hit the same issue for any SCEV, or ValueTracking query done during
3588   // mutation.  See PR49900.
3589   getOrCreateTripCount(OrigLoop);
3590 
3591   // Create an empty vector loop, and prepare basic blocks for the runtime
3592   // checks.
3593   Loop *Lp = createVectorLoopSkeleton("");
3594 
3595   // Now, compare the new count to zero. If it is zero skip the vector loop and
3596   // jump to the scalar loop. This check also covers the case where the
3597   // backedge-taken count is uint##_max: adding one to it will overflow leading
3598   // to an incorrect trip count of zero. In this (rare) case we will also jump
3599   // to the scalar loop.
3600   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3601 
3602   // Generate the code to check any assumptions that we've made for SCEV
3603   // expressions.
3604   emitSCEVChecks(Lp, LoopScalarPreHeader);
3605 
3606   // Generate the code that checks in runtime if arrays overlap. We put the
3607   // checks into a separate block to make the more common case of few elements
3608   // faster.
3609   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3610 
3611   createHeaderBranch(Lp);
3612 
3613   // Emit phis for the new starting index of the scalar loop.
3614   createInductionResumeValues(Lp);
3615 
3616   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3617 }
3618 
3619 // Fix up external users of the induction variable. At this point, we are
3620 // in LCSSA form, with all external PHIs that use the IV having one input value,
3621 // coming from the remainder loop. We need those PHIs to also have a correct
3622 // value for the IV when arriving directly from the middle block.
3623 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3624                                        const InductionDescriptor &II,
3625                                        Value *CountRoundDown, Value *EndValue,
3626                                        BasicBlock *MiddleBlock) {
3627   // There are two kinds of external IV usages - those that use the value
3628   // computed in the last iteration (the PHI) and those that use the penultimate
3629   // value (the value that feeds into the phi from the loop latch).
3630   // We allow both, but they, obviously, have different values.
3631 
3632   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3633 
3634   DenseMap<Value *, Value *> MissingVals;
3635 
3636   // An external user of the last iteration's value should see the value that
3637   // the remainder loop uses to initialize its own IV.
3638   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3639   for (User *U : PostInc->users()) {
3640     Instruction *UI = cast<Instruction>(U);
3641     if (!OrigLoop->contains(UI)) {
3642       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3643       MissingVals[UI] = EndValue;
3644     }
3645   }
3646 
3647   // An external user of the penultimate value need to see EndValue - Step.
3648   // The simplest way to get this is to recompute it from the constituent SCEVs,
3649   // that is Start + (Step * (CRD - 1)).
3650   for (User *U : OrigPhi->users()) {
3651     auto *UI = cast<Instruction>(U);
3652     if (!OrigLoop->contains(UI)) {
3653       const DataLayout &DL =
3654           OrigLoop->getHeader()->getModule()->getDataLayout();
3655       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3656 
3657       IRBuilder<> B(MiddleBlock->getTerminator());
3658 
3659       // Fast-math-flags propagate from the original induction instruction.
3660       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3661         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3662 
3663       Value *CountMinusOne = B.CreateSub(
3664           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3665       Value *CMO =
3666           !II.getStep()->getType()->isIntegerTy()
3667               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3668                              II.getStep()->getType())
3669               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3670       CMO->setName("cast.cmo");
3671       Value *Escape =
3672           emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody);
3673       Escape->setName("ind.escape");
3674       MissingVals[UI] = Escape;
3675     }
3676   }
3677 
3678   for (auto &I : MissingVals) {
3679     PHINode *PHI = cast<PHINode>(I.first);
3680     // One corner case we have to handle is two IVs "chasing" each-other,
3681     // that is %IV2 = phi [...], [ %IV1, %latch ]
3682     // In this case, if IV1 has an external use, we need to avoid adding both
3683     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3684     // don't already have an incoming value for the middle block.
3685     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3686       PHI->addIncoming(I.second, MiddleBlock);
3687   }
3688 }
3689 
3690 namespace {
3691 
3692 struct CSEDenseMapInfo {
3693   static bool canHandle(const Instruction *I) {
3694     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3695            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3696   }
3697 
3698   static inline Instruction *getEmptyKey() {
3699     return DenseMapInfo<Instruction *>::getEmptyKey();
3700   }
3701 
3702   static inline Instruction *getTombstoneKey() {
3703     return DenseMapInfo<Instruction *>::getTombstoneKey();
3704   }
3705 
3706   static unsigned getHashValue(const Instruction *I) {
3707     assert(canHandle(I) && "Unknown instruction!");
3708     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3709                                                            I->value_op_end()));
3710   }
3711 
3712   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3713     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3714         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3715       return LHS == RHS;
3716     return LHS->isIdenticalTo(RHS);
3717   }
3718 };
3719 
3720 } // end anonymous namespace
3721 
3722 ///Perform cse of induction variable instructions.
3723 static void cse(BasicBlock *BB) {
3724   // Perform simple cse.
3725   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3726   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3727     if (!CSEDenseMapInfo::canHandle(&In))
3728       continue;
3729 
3730     // Check if we can replace this instruction with any of the
3731     // visited instructions.
3732     if (Instruction *V = CSEMap.lookup(&In)) {
3733       In.replaceAllUsesWith(V);
3734       In.eraseFromParent();
3735       continue;
3736     }
3737 
3738     CSEMap[&In] = &In;
3739   }
3740 }
3741 
3742 InstructionCost
3743 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3744                                               bool &NeedToScalarize) const {
3745   Function *F = CI->getCalledFunction();
3746   Type *ScalarRetTy = CI->getType();
3747   SmallVector<Type *, 4> Tys, ScalarTys;
3748   for (auto &ArgOp : CI->args())
3749     ScalarTys.push_back(ArgOp->getType());
3750 
3751   // Estimate cost of scalarized vector call. The source operands are assumed
3752   // to be vectors, so we need to extract individual elements from there,
3753   // execute VF scalar calls, and then gather the result into the vector return
3754   // value.
3755   InstructionCost ScalarCallCost =
3756       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3757   if (VF.isScalar())
3758     return ScalarCallCost;
3759 
3760   // Compute corresponding vector type for return value and arguments.
3761   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3762   for (Type *ScalarTy : ScalarTys)
3763     Tys.push_back(ToVectorTy(ScalarTy, VF));
3764 
3765   // Compute costs of unpacking argument values for the scalar calls and
3766   // packing the return values to a vector.
3767   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3768 
3769   InstructionCost Cost =
3770       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3771 
3772   // If we can't emit a vector call for this function, then the currently found
3773   // cost is the cost we need to return.
3774   NeedToScalarize = true;
3775   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3776   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3777 
3778   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3779     return Cost;
3780 
3781   // If the corresponding vector cost is cheaper, return its cost.
3782   InstructionCost VectorCallCost =
3783       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3784   if (VectorCallCost < Cost) {
3785     NeedToScalarize = false;
3786     Cost = VectorCallCost;
3787   }
3788   return Cost;
3789 }
3790 
3791 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3792   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3793     return Elt;
3794   return VectorType::get(Elt, VF);
3795 }
3796 
3797 InstructionCost
3798 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3799                                                    ElementCount VF) const {
3800   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3801   assert(ID && "Expected intrinsic call!");
3802   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3803   FastMathFlags FMF;
3804   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3805     FMF = FPMO->getFastMathFlags();
3806 
3807   SmallVector<const Value *> Arguments(CI->args());
3808   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3809   SmallVector<Type *> ParamTys;
3810   std::transform(FTy->param_begin(), FTy->param_end(),
3811                  std::back_inserter(ParamTys),
3812                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3813 
3814   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3815                                     dyn_cast<IntrinsicInst>(CI));
3816   return TTI.getIntrinsicInstrCost(CostAttrs,
3817                                    TargetTransformInfo::TCK_RecipThroughput);
3818 }
3819 
3820 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3821   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3822   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3823   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3824 }
3825 
3826 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3827   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3828   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3829   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3830 }
3831 
3832 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3833   // For every instruction `I` in MinBWs, truncate the operands, create a
3834   // truncated version of `I` and reextend its result. InstCombine runs
3835   // later and will remove any ext/trunc pairs.
3836   SmallPtrSet<Value *, 4> Erased;
3837   for (const auto &KV : Cost->getMinimalBitwidths()) {
3838     // If the value wasn't vectorized, we must maintain the original scalar
3839     // type. The absence of the value from State indicates that it
3840     // wasn't vectorized.
3841     // FIXME: Should not rely on getVPValue at this point.
3842     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3843     if (!State.hasAnyVectorValue(Def))
3844       continue;
3845     for (unsigned Part = 0; Part < UF; ++Part) {
3846       Value *I = State.get(Def, Part);
3847       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3848         continue;
3849       Type *OriginalTy = I->getType();
3850       Type *ScalarTruncatedTy =
3851           IntegerType::get(OriginalTy->getContext(), KV.second);
3852       auto *TruncatedTy = VectorType::get(
3853           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3854       if (TruncatedTy == OriginalTy)
3855         continue;
3856 
3857       IRBuilder<> B(cast<Instruction>(I));
3858       auto ShrinkOperand = [&](Value *V) -> Value * {
3859         if (auto *ZI = dyn_cast<ZExtInst>(V))
3860           if (ZI->getSrcTy() == TruncatedTy)
3861             return ZI->getOperand(0);
3862         return B.CreateZExtOrTrunc(V, TruncatedTy);
3863       };
3864 
3865       // The actual instruction modification depends on the instruction type,
3866       // unfortunately.
3867       Value *NewI = nullptr;
3868       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3869         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3870                              ShrinkOperand(BO->getOperand(1)));
3871 
3872         // Any wrapping introduced by shrinking this operation shouldn't be
3873         // considered undefined behavior. So, we can't unconditionally copy
3874         // arithmetic wrapping flags to NewI.
3875         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3876       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3877         NewI =
3878             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3879                          ShrinkOperand(CI->getOperand(1)));
3880       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3881         NewI = B.CreateSelect(SI->getCondition(),
3882                               ShrinkOperand(SI->getTrueValue()),
3883                               ShrinkOperand(SI->getFalseValue()));
3884       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3885         switch (CI->getOpcode()) {
3886         default:
3887           llvm_unreachable("Unhandled cast!");
3888         case Instruction::Trunc:
3889           NewI = ShrinkOperand(CI->getOperand(0));
3890           break;
3891         case Instruction::SExt:
3892           NewI = B.CreateSExtOrTrunc(
3893               CI->getOperand(0),
3894               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3895           break;
3896         case Instruction::ZExt:
3897           NewI = B.CreateZExtOrTrunc(
3898               CI->getOperand(0),
3899               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3900           break;
3901         }
3902       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3903         auto Elements0 =
3904             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3905         auto *O0 = B.CreateZExtOrTrunc(
3906             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3907         auto Elements1 =
3908             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3909         auto *O1 = B.CreateZExtOrTrunc(
3910             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3911 
3912         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3913       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3914         // Don't do anything with the operands, just extend the result.
3915         continue;
3916       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3917         auto Elements =
3918             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3919         auto *O0 = B.CreateZExtOrTrunc(
3920             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3921         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3922         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3923       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3924         auto Elements =
3925             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3926         auto *O0 = B.CreateZExtOrTrunc(
3927             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3928         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3929       } else {
3930         // If we don't know what to do, be conservative and don't do anything.
3931         continue;
3932       }
3933 
3934       // Lastly, extend the result.
3935       NewI->takeName(cast<Instruction>(I));
3936       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3937       I->replaceAllUsesWith(Res);
3938       cast<Instruction>(I)->eraseFromParent();
3939       Erased.insert(I);
3940       State.reset(Def, Res, Part);
3941     }
3942   }
3943 
3944   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3945   for (const auto &KV : Cost->getMinimalBitwidths()) {
3946     // If the value wasn't vectorized, we must maintain the original scalar
3947     // type. The absence of the value from State indicates that it
3948     // wasn't vectorized.
3949     // FIXME: Should not rely on getVPValue at this point.
3950     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3951     if (!State.hasAnyVectorValue(Def))
3952       continue;
3953     for (unsigned Part = 0; Part < UF; ++Part) {
3954       Value *I = State.get(Def, Part);
3955       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3956       if (Inst && Inst->use_empty()) {
3957         Value *NewI = Inst->getOperand(0);
3958         Inst->eraseFromParent();
3959         State.reset(Def, NewI, Part);
3960       }
3961     }
3962   }
3963 }
3964 
3965 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3966   // Insert truncates and extends for any truncated instructions as hints to
3967   // InstCombine.
3968   if (VF.isVector())
3969     truncateToMinimalBitwidths(State);
3970 
3971   // Fix widened non-induction PHIs by setting up the PHI operands.
3972   if (OrigPHIsToFix.size()) {
3973     assert(EnableVPlanNativePath &&
3974            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3975     fixNonInductionPHIs(State);
3976   }
3977 
3978   // At this point every instruction in the original loop is widened to a
3979   // vector form. Now we need to fix the recurrences in the loop. These PHI
3980   // nodes are currently empty because we did not want to introduce cycles.
3981   // This is the second stage of vectorizing recurrences.
3982   fixCrossIterationPHIs(State);
3983 
3984   // Forget the original basic block.
3985   PSE.getSE()->forgetLoop(OrigLoop);
3986 
3987   // If we inserted an edge from the middle block to the unique exit block,
3988   // update uses outside the loop (phis) to account for the newly inserted
3989   // edge.
3990   if (!Cost->requiresScalarEpilogue(VF)) {
3991     // Fix-up external users of the induction variables.
3992     for (auto &Entry : Legal->getInductionVars())
3993       fixupIVUsers(Entry.first, Entry.second,
3994                    getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3995                    IVEndValues[Entry.first], LoopMiddleBlock);
3996 
3997     fixLCSSAPHIs(State);
3998   }
3999 
4000   for (Instruction *PI : PredicatedInstructions)
4001     sinkScalarOperands(&*PI);
4002 
4003   // Remove redundant induction instructions.
4004   cse(LoopVectorBody);
4005 
4006   // Set/update profile weights for the vector and remainder loops as original
4007   // loop iterations are now distributed among them. Note that original loop
4008   // represented by LoopScalarBody becomes remainder loop after vectorization.
4009   //
4010   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4011   // end up getting slightly roughened result but that should be OK since
4012   // profile is not inherently precise anyway. Note also possible bypass of
4013   // vector code caused by legality checks is ignored, assigning all the weight
4014   // to the vector loop, optimistically.
4015   //
4016   // For scalable vectorization we can't know at compile time how many iterations
4017   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4018   // vscale of '1'.
4019   setProfileInfoAfterUnrolling(
4020       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4021       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4022 }
4023 
4024 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4025   // In order to support recurrences we need to be able to vectorize Phi nodes.
4026   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4027   // stage #2: We now need to fix the recurrences by adding incoming edges to
4028   // the currently empty PHI nodes. At this point every instruction in the
4029   // original loop is widened to a vector form so we can use them to construct
4030   // the incoming edges.
4031   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4032   for (VPRecipeBase &R : Header->phis()) {
4033     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
4034       fixReduction(ReductionPhi, State);
4035     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
4036       fixFirstOrderRecurrence(FOR, State);
4037   }
4038 }
4039 
4040 void InnerLoopVectorizer::fixFirstOrderRecurrence(
4041     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
4042   // This is the second phase of vectorizing first-order recurrences. An
4043   // overview of the transformation is described below. Suppose we have the
4044   // following loop.
4045   //
4046   //   for (int i = 0; i < n; ++i)
4047   //     b[i] = a[i] - a[i - 1];
4048   //
4049   // There is a first-order recurrence on "a". For this loop, the shorthand
4050   // scalar IR looks like:
4051   //
4052   //   scalar.ph:
4053   //     s_init = a[-1]
4054   //     br scalar.body
4055   //
4056   //   scalar.body:
4057   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4058   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4059   //     s2 = a[i]
4060   //     b[i] = s2 - s1
4061   //     br cond, scalar.body, ...
4062   //
4063   // In this example, s1 is a recurrence because it's value depends on the
4064   // previous iteration. In the first phase of vectorization, we created a
4065   // vector phi v1 for s1. We now complete the vectorization and produce the
4066   // shorthand vector IR shown below (for VF = 4, UF = 1).
4067   //
4068   //   vector.ph:
4069   //     v_init = vector(..., ..., ..., a[-1])
4070   //     br vector.body
4071   //
4072   //   vector.body
4073   //     i = phi [0, vector.ph], [i+4, vector.body]
4074   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4075   //     v2 = a[i, i+1, i+2, i+3];
4076   //     v3 = vector(v1(3), v2(0, 1, 2))
4077   //     b[i, i+1, i+2, i+3] = v2 - v3
4078   //     br cond, vector.body, middle.block
4079   //
4080   //   middle.block:
4081   //     x = v2(3)
4082   //     br scalar.ph
4083   //
4084   //   scalar.ph:
4085   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4086   //     br scalar.body
4087   //
4088   // After execution completes the vector loop, we extract the next value of
4089   // the recurrence (x) to use as the initial value in the scalar loop.
4090 
4091   // Extract the last vector element in the middle block. This will be the
4092   // initial value for the recurrence when jumping to the scalar loop.
4093   VPValue *PreviousDef = PhiR->getBackedgeValue();
4094   Value *Incoming = State.get(PreviousDef, UF - 1);
4095   auto *ExtractForScalar = Incoming;
4096   auto *IdxTy = Builder.getInt32Ty();
4097   if (VF.isVector()) {
4098     auto *One = ConstantInt::get(IdxTy, 1);
4099     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4100     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4101     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4102     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4103                                                     "vector.recur.extract");
4104   }
4105   // Extract the second last element in the middle block if the
4106   // Phi is used outside the loop. We need to extract the phi itself
4107   // and not the last element (the phi update in the current iteration). This
4108   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4109   // when the scalar loop is not run at all.
4110   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4111   if (VF.isVector()) {
4112     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4113     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4114     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4115         Incoming, Idx, "vector.recur.extract.for.phi");
4116   } else if (UF > 1)
4117     // When loop is unrolled without vectorizing, initialize
4118     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4119     // of `Incoming`. This is analogous to the vectorized case above: extracting
4120     // the second last element when VF > 1.
4121     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4122 
4123   // Fix the initial value of the original recurrence in the scalar loop.
4124   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4125   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4126   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4127   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4128   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4129     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4130     Start->addIncoming(Incoming, BB);
4131   }
4132 
4133   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4134   Phi->setName("scalar.recur");
4135 
4136   // Finally, fix users of the recurrence outside the loop. The users will need
4137   // either the last value of the scalar recurrence or the last value of the
4138   // vector recurrence we extracted in the middle block. Since the loop is in
4139   // LCSSA form, we just need to find all the phi nodes for the original scalar
4140   // recurrence in the exit block, and then add an edge for the middle block.
4141   // Note that LCSSA does not imply single entry when the original scalar loop
4142   // had multiple exiting edges (as we always run the last iteration in the
4143   // scalar epilogue); in that case, there is no edge from middle to exit and
4144   // and thus no phis which needed updated.
4145   if (!Cost->requiresScalarEpilogue(VF))
4146     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4147       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
4148         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4149 }
4150 
4151 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4152                                        VPTransformState &State) {
4153   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4154   // Get it's reduction variable descriptor.
4155   assert(Legal->isReductionVariable(OrigPhi) &&
4156          "Unable to find the reduction variable");
4157   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4158 
4159   RecurKind RK = RdxDesc.getRecurrenceKind();
4160   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4161   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4162   setDebugLocFromInst(ReductionStartValue);
4163 
4164   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4165   // This is the vector-clone of the value that leaves the loop.
4166   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4167 
4168   // Wrap flags are in general invalid after vectorization, clear them.
4169   clearReductionWrapFlags(RdxDesc, State);
4170 
4171   // Before each round, move the insertion point right between
4172   // the PHIs and the values we are going to write.
4173   // This allows us to write both PHINodes and the extractelement
4174   // instructions.
4175   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4176 
4177   setDebugLocFromInst(LoopExitInst);
4178 
4179   Type *PhiTy = OrigPhi->getType();
4180   // If tail is folded by masking, the vector value to leave the loop should be
4181   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4182   // instead of the former. For an inloop reduction the reduction will already
4183   // be predicated, and does not need to be handled here.
4184   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4185     for (unsigned Part = 0; Part < UF; ++Part) {
4186       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4187       Value *Sel = nullptr;
4188       for (User *U : VecLoopExitInst->users()) {
4189         if (isa<SelectInst>(U)) {
4190           assert(!Sel && "Reduction exit feeding two selects");
4191           Sel = U;
4192         } else
4193           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4194       }
4195       assert(Sel && "Reduction exit feeds no select");
4196       State.reset(LoopExitInstDef, Sel, Part);
4197 
4198       // If the target can create a predicated operator for the reduction at no
4199       // extra cost in the loop (for example a predicated vadd), it can be
4200       // cheaper for the select to remain in the loop than be sunk out of it,
4201       // and so use the select value for the phi instead of the old
4202       // LoopExitValue.
4203       if (PreferPredicatedReductionSelect ||
4204           TTI->preferPredicatedReductionSelect(
4205               RdxDesc.getOpcode(), PhiTy,
4206               TargetTransformInfo::ReductionFlags())) {
4207         auto *VecRdxPhi =
4208             cast<PHINode>(State.get(PhiR, Part));
4209         VecRdxPhi->setIncomingValueForBlock(
4210             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4211       }
4212     }
4213   }
4214 
4215   // If the vector reduction can be performed in a smaller type, we truncate
4216   // then extend the loop exit value to enable InstCombine to evaluate the
4217   // entire expression in the smaller type.
4218   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4219     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4220     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4221     Builder.SetInsertPoint(
4222         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4223     VectorParts RdxParts(UF);
4224     for (unsigned Part = 0; Part < UF; ++Part) {
4225       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4226       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4227       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4228                                         : Builder.CreateZExt(Trunc, VecTy);
4229       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
4230         if (U != Trunc) {
4231           U->replaceUsesOfWith(RdxParts[Part], Extnd);
4232           RdxParts[Part] = Extnd;
4233         }
4234     }
4235     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4236     for (unsigned Part = 0; Part < UF; ++Part) {
4237       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4238       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4239     }
4240   }
4241 
4242   // Reduce all of the unrolled parts into a single vector.
4243   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4244   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4245 
4246   // The middle block terminator has already been assigned a DebugLoc here (the
4247   // OrigLoop's single latch terminator). We want the whole middle block to
4248   // appear to execute on this line because: (a) it is all compiler generated,
4249   // (b) these instructions are always executed after evaluating the latch
4250   // conditional branch, and (c) other passes may add new predecessors which
4251   // terminate on this line. This is the easiest way to ensure we don't
4252   // accidentally cause an extra step back into the loop while debugging.
4253   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4254   if (PhiR->isOrdered())
4255     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4256   else {
4257     // Floating-point operations should have some FMF to enable the reduction.
4258     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4259     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4260     for (unsigned Part = 1; Part < UF; ++Part) {
4261       Value *RdxPart = State.get(LoopExitInstDef, Part);
4262       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4263         ReducedPartRdx = Builder.CreateBinOp(
4264             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4265       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4266         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4267                                            ReducedPartRdx, RdxPart);
4268       else
4269         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4270     }
4271   }
4272 
4273   // Create the reduction after the loop. Note that inloop reductions create the
4274   // target reduction in the loop using a Reduction recipe.
4275   if (VF.isVector() && !PhiR->isInLoop()) {
4276     ReducedPartRdx =
4277         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4278     // If the reduction can be performed in a smaller type, we need to extend
4279     // the reduction to the wider type before we branch to the original loop.
4280     if (PhiTy != RdxDesc.getRecurrenceType())
4281       ReducedPartRdx = RdxDesc.isSigned()
4282                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4283                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4284   }
4285 
4286   // Create a phi node that merges control-flow from the backedge-taken check
4287   // block and the middle block.
4288   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4289                                         LoopScalarPreHeader->getTerminator());
4290   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4291     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4292   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4293 
4294   // Now, we need to fix the users of the reduction variable
4295   // inside and outside of the scalar remainder loop.
4296 
4297   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4298   // in the exit blocks.  See comment on analogous loop in
4299   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4300   if (!Cost->requiresScalarEpilogue(VF))
4301     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4302       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4303         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4304 
4305   // Fix the scalar loop reduction variable with the incoming reduction sum
4306   // from the vector body and from the backedge value.
4307   int IncomingEdgeBlockIdx =
4308       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4309   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4310   // Pick the other block.
4311   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4312   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4313   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4314 }
4315 
4316 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4317                                                   VPTransformState &State) {
4318   RecurKind RK = RdxDesc.getRecurrenceKind();
4319   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4320     return;
4321 
4322   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4323   assert(LoopExitInstr && "null loop exit instruction");
4324   SmallVector<Instruction *, 8> Worklist;
4325   SmallPtrSet<Instruction *, 8> Visited;
4326   Worklist.push_back(LoopExitInstr);
4327   Visited.insert(LoopExitInstr);
4328 
4329   while (!Worklist.empty()) {
4330     Instruction *Cur = Worklist.pop_back_val();
4331     if (isa<OverflowingBinaryOperator>(Cur))
4332       for (unsigned Part = 0; Part < UF; ++Part) {
4333         // FIXME: Should not rely on getVPValue at this point.
4334         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4335         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4336       }
4337 
4338     for (User *U : Cur->users()) {
4339       Instruction *UI = cast<Instruction>(U);
4340       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4341           Visited.insert(UI).second)
4342         Worklist.push_back(UI);
4343     }
4344   }
4345 }
4346 
4347 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4348   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4349     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4350       // Some phis were already hand updated by the reduction and recurrence
4351       // code above, leave them alone.
4352       continue;
4353 
4354     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4355     // Non-instruction incoming values will have only one value.
4356 
4357     VPLane Lane = VPLane::getFirstLane();
4358     if (isa<Instruction>(IncomingValue) &&
4359         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4360                                            VF))
4361       Lane = VPLane::getLastLaneForVF(VF);
4362 
4363     // Can be a loop invariant incoming value or the last scalar value to be
4364     // extracted from the vectorized loop.
4365     // FIXME: Should not rely on getVPValue at this point.
4366     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4367     Value *lastIncomingValue =
4368         OrigLoop->isLoopInvariant(IncomingValue)
4369             ? IncomingValue
4370             : State.get(State.Plan->getVPValue(IncomingValue, true),
4371                         VPIteration(UF - 1, Lane));
4372     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4373   }
4374 }
4375 
4376 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4377   // The basic block and loop containing the predicated instruction.
4378   auto *PredBB = PredInst->getParent();
4379   auto *VectorLoop = LI->getLoopFor(PredBB);
4380 
4381   // Initialize a worklist with the operands of the predicated instruction.
4382   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4383 
4384   // Holds instructions that we need to analyze again. An instruction may be
4385   // reanalyzed if we don't yet know if we can sink it or not.
4386   SmallVector<Instruction *, 8> InstsToReanalyze;
4387 
4388   // Returns true if a given use occurs in the predicated block. Phi nodes use
4389   // their operands in their corresponding predecessor blocks.
4390   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4391     auto *I = cast<Instruction>(U.getUser());
4392     BasicBlock *BB = I->getParent();
4393     if (auto *Phi = dyn_cast<PHINode>(I))
4394       BB = Phi->getIncomingBlock(
4395           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4396     return BB == PredBB;
4397   };
4398 
4399   // Iteratively sink the scalarized operands of the predicated instruction
4400   // into the block we created for it. When an instruction is sunk, it's
4401   // operands are then added to the worklist. The algorithm ends after one pass
4402   // through the worklist doesn't sink a single instruction.
4403   bool Changed;
4404   do {
4405     // Add the instructions that need to be reanalyzed to the worklist, and
4406     // reset the changed indicator.
4407     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4408     InstsToReanalyze.clear();
4409     Changed = false;
4410 
4411     while (!Worklist.empty()) {
4412       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4413 
4414       // We can't sink an instruction if it is a phi node, is not in the loop,
4415       // or may have side effects.
4416       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4417           I->mayHaveSideEffects())
4418         continue;
4419 
4420       // If the instruction is already in PredBB, check if we can sink its
4421       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4422       // sinking the scalar instruction I, hence it appears in PredBB; but it
4423       // may have failed to sink I's operands (recursively), which we try
4424       // (again) here.
4425       if (I->getParent() == PredBB) {
4426         Worklist.insert(I->op_begin(), I->op_end());
4427         continue;
4428       }
4429 
4430       // It's legal to sink the instruction if all its uses occur in the
4431       // predicated block. Otherwise, there's nothing to do yet, and we may
4432       // need to reanalyze the instruction.
4433       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4434         InstsToReanalyze.push_back(I);
4435         continue;
4436       }
4437 
4438       // Move the instruction to the beginning of the predicated block, and add
4439       // it's operands to the worklist.
4440       I->moveBefore(&*PredBB->getFirstInsertionPt());
4441       Worklist.insert(I->op_begin(), I->op_end());
4442 
4443       // The sinking may have enabled other instructions to be sunk, so we will
4444       // need to iterate.
4445       Changed = true;
4446     }
4447   } while (Changed);
4448 }
4449 
4450 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4451   for (PHINode *OrigPhi : OrigPHIsToFix) {
4452     VPWidenPHIRecipe *VPPhi =
4453         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4454     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4455     // Make sure the builder has a valid insert point.
4456     Builder.SetInsertPoint(NewPhi);
4457     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4458       VPValue *Inc = VPPhi->getIncomingValue(i);
4459       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4460       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4461     }
4462   }
4463 }
4464 
4465 bool InnerLoopVectorizer::useOrderedReductions(
4466     const RecurrenceDescriptor &RdxDesc) {
4467   return Cost->useOrderedReductions(RdxDesc);
4468 }
4469 
4470 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4471                                               VPWidenPHIRecipe *PhiR,
4472                                               VPTransformState &State) {
4473   PHINode *P = cast<PHINode>(PN);
4474   if (EnableVPlanNativePath) {
4475     // Currently we enter here in the VPlan-native path for non-induction
4476     // PHIs where all control flow is uniform. We simply widen these PHIs.
4477     // Create a vector phi with no operands - the vector phi operands will be
4478     // set at the end of vector code generation.
4479     Type *VecTy = (State.VF.isScalar())
4480                       ? PN->getType()
4481                       : VectorType::get(PN->getType(), State.VF);
4482     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4483     State.set(PhiR, VecPhi, 0);
4484     OrigPHIsToFix.push_back(P);
4485 
4486     return;
4487   }
4488 
4489   assert(PN->getParent() == OrigLoop->getHeader() &&
4490          "Non-header phis should have been handled elsewhere");
4491 
4492   // In order to support recurrences we need to be able to vectorize Phi nodes.
4493   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4494   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4495   // this value when we vectorize all of the instructions that use the PHI.
4496 
4497   assert(!Legal->isReductionVariable(P) &&
4498          "reductions should be handled elsewhere");
4499 
4500   setDebugLocFromInst(P);
4501 
4502   // This PHINode must be an induction variable.
4503   // Make sure that we know about it.
4504   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4505 
4506   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4507   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4508 
4509   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4510   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4511 
4512   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4513   // which can be found from the original scalar operations.
4514   switch (II.getKind()) {
4515   case InductionDescriptor::IK_NoInduction:
4516     llvm_unreachable("Unknown induction");
4517   case InductionDescriptor::IK_IntInduction:
4518   case InductionDescriptor::IK_FpInduction:
4519     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4520   case InductionDescriptor::IK_PtrInduction: {
4521     // Handle the pointer induction variable case.
4522     assert(P->getType()->isPointerTy() && "Unexpected type.");
4523 
4524     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4525       // This is the normalized GEP that starts counting at zero.
4526       Value *PtrInd =
4527           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4528       // Determine the number of scalars we need to generate for each unroll
4529       // iteration. If the instruction is uniform, we only need to generate the
4530       // first lane. Otherwise, we generate all VF values.
4531       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4532       assert((IsUniform || !State.VF.isScalable()) &&
4533              "Cannot scalarize a scalable VF");
4534       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4535 
4536       for (unsigned Part = 0; Part < UF; ++Part) {
4537         Value *PartStart =
4538             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4539 
4540         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4541           Value *Idx = Builder.CreateAdd(
4542               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4543           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4544           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(),
4545                                                 DL, II, State.CFG.PrevBB);
4546           SclrGep->setName("next.gep");
4547           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4548         }
4549       }
4550       return;
4551     }
4552     assert(isa<SCEVConstant>(II.getStep()) &&
4553            "Induction step not a SCEV constant!");
4554     Type *PhiType = II.getStep()->getType();
4555 
4556     // Build a pointer phi
4557     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4558     Type *ScStValueType = ScalarStartValue->getType();
4559     PHINode *NewPointerPhi =
4560         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4561     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4562 
4563     // A pointer induction, performed by using a gep
4564     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4565     Instruction *InductionLoc = LoopLatch->getTerminator();
4566     const SCEV *ScalarStep = II.getStep();
4567     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4568     Value *ScalarStepValue =
4569         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4570     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4571     Value *NumUnrolledElems =
4572         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4573     Value *InductionGEP = GetElementPtrInst::Create(
4574         II.getElementType(), NewPointerPhi,
4575         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4576         InductionLoc);
4577     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4578 
4579     // Create UF many actual address geps that use the pointer
4580     // phi as base and a vectorized version of the step value
4581     // (<step*0, ..., step*N>) as offset.
4582     for (unsigned Part = 0; Part < State.UF; ++Part) {
4583       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4584       Value *StartOffsetScalar =
4585           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4586       Value *StartOffset =
4587           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4588       // Create a vector of consecutive numbers from zero to VF.
4589       StartOffset =
4590           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4591 
4592       Value *GEP = Builder.CreateGEP(
4593           II.getElementType(), NewPointerPhi,
4594           Builder.CreateMul(
4595               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4596               "vector.gep"));
4597       State.set(PhiR, GEP, Part);
4598     }
4599   }
4600   }
4601 }
4602 
4603 /// A helper function for checking whether an integer division-related
4604 /// instruction may divide by zero (in which case it must be predicated if
4605 /// executed conditionally in the scalar code).
4606 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4607 /// Non-zero divisors that are non compile-time constants will not be
4608 /// converted into multiplication, so we will still end up scalarizing
4609 /// the division, but can do so w/o predication.
4610 static bool mayDivideByZero(Instruction &I) {
4611   assert((I.getOpcode() == Instruction::UDiv ||
4612           I.getOpcode() == Instruction::SDiv ||
4613           I.getOpcode() == Instruction::URem ||
4614           I.getOpcode() == Instruction::SRem) &&
4615          "Unexpected instruction");
4616   Value *Divisor = I.getOperand(1);
4617   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4618   return !CInt || CInt->isZero();
4619 }
4620 
4621 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4622                                                VPUser &ArgOperands,
4623                                                VPTransformState &State) {
4624   assert(!isa<DbgInfoIntrinsic>(I) &&
4625          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4626   setDebugLocFromInst(&I);
4627 
4628   Module *M = I.getParent()->getParent()->getParent();
4629   auto *CI = cast<CallInst>(&I);
4630 
4631   SmallVector<Type *, 4> Tys;
4632   for (Value *ArgOperand : CI->args())
4633     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4634 
4635   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4636 
4637   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4638   // version of the instruction.
4639   // Is it beneficial to perform intrinsic call compared to lib call?
4640   bool NeedToScalarize = false;
4641   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4642   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4643   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4644   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4645          "Instruction should be scalarized elsewhere.");
4646   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4647          "Either the intrinsic cost or vector call cost must be valid");
4648 
4649   for (unsigned Part = 0; Part < UF; ++Part) {
4650     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4651     SmallVector<Value *, 4> Args;
4652     for (auto &I : enumerate(ArgOperands.operands())) {
4653       // Some intrinsics have a scalar argument - don't replace it with a
4654       // vector.
4655       Value *Arg;
4656       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4657         Arg = State.get(I.value(), Part);
4658       else {
4659         Arg = State.get(I.value(), VPIteration(0, 0));
4660         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4661           TysForDecl.push_back(Arg->getType());
4662       }
4663       Args.push_back(Arg);
4664     }
4665 
4666     Function *VectorF;
4667     if (UseVectorIntrinsic) {
4668       // Use vector version of the intrinsic.
4669       if (VF.isVector())
4670         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4671       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4672       assert(VectorF && "Can't retrieve vector intrinsic.");
4673     } else {
4674       // Use vector version of the function call.
4675       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4676 #ifndef NDEBUG
4677       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4678              "Can't create vector function.");
4679 #endif
4680         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4681     }
4682       SmallVector<OperandBundleDef, 1> OpBundles;
4683       CI->getOperandBundlesAsDefs(OpBundles);
4684       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4685 
4686       if (isa<FPMathOperator>(V))
4687         V->copyFastMathFlags(CI);
4688 
4689       State.set(Def, V, Part);
4690       addMetadata(V, &I);
4691   }
4692 }
4693 
4694 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4695   // We should not collect Scalars more than once per VF. Right now, this
4696   // function is called from collectUniformsAndScalars(), which already does
4697   // this check. Collecting Scalars for VF=1 does not make any sense.
4698   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4699          "This function should not be visited twice for the same VF");
4700 
4701   SmallSetVector<Instruction *, 8> Worklist;
4702 
4703   // These sets are used to seed the analysis with pointers used by memory
4704   // accesses that will remain scalar.
4705   SmallSetVector<Instruction *, 8> ScalarPtrs;
4706   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4707   auto *Latch = TheLoop->getLoopLatch();
4708 
4709   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4710   // The pointer operands of loads and stores will be scalar as long as the
4711   // memory access is not a gather or scatter operation. The value operand of a
4712   // store will remain scalar if the store is scalarized.
4713   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4714     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4715     assert(WideningDecision != CM_Unknown &&
4716            "Widening decision should be ready at this moment");
4717     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4718       if (Ptr == Store->getValueOperand())
4719         return WideningDecision == CM_Scalarize;
4720     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4721            "Ptr is neither a value or pointer operand");
4722     return WideningDecision != CM_GatherScatter;
4723   };
4724 
4725   // A helper that returns true if the given value is a bitcast or
4726   // getelementptr instruction contained in the loop.
4727   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4728     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4729             isa<GetElementPtrInst>(V)) &&
4730            !TheLoop->isLoopInvariant(V);
4731   };
4732 
4733   // A helper that evaluates a memory access's use of a pointer. If the use will
4734   // be a scalar use and the pointer is only used by memory accesses, we place
4735   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4736   // PossibleNonScalarPtrs.
4737   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4738     // We only care about bitcast and getelementptr instructions contained in
4739     // the loop.
4740     if (!isLoopVaryingBitCastOrGEP(Ptr))
4741       return;
4742 
4743     // If the pointer has already been identified as scalar (e.g., if it was
4744     // also identified as uniform), there's nothing to do.
4745     auto *I = cast<Instruction>(Ptr);
4746     if (Worklist.count(I))
4747       return;
4748 
4749     // If the use of the pointer will be a scalar use, and all users of the
4750     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4751     // place the pointer in PossibleNonScalarPtrs.
4752     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4753           return isa<LoadInst>(U) || isa<StoreInst>(U);
4754         }))
4755       ScalarPtrs.insert(I);
4756     else
4757       PossibleNonScalarPtrs.insert(I);
4758   };
4759 
4760   // We seed the scalars analysis with three classes of instructions: (1)
4761   // instructions marked uniform-after-vectorization and (2) bitcast,
4762   // getelementptr and (pointer) phi instructions used by memory accesses
4763   // requiring a scalar use.
4764   //
4765   // (1) Add to the worklist all instructions that have been identified as
4766   // uniform-after-vectorization.
4767   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4768 
4769   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4770   // memory accesses requiring a scalar use. The pointer operands of loads and
4771   // stores will be scalar as long as the memory accesses is not a gather or
4772   // scatter operation. The value operand of a store will remain scalar if the
4773   // store is scalarized.
4774   for (auto *BB : TheLoop->blocks())
4775     for (auto &I : *BB) {
4776       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4777         evaluatePtrUse(Load, Load->getPointerOperand());
4778       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4779         evaluatePtrUse(Store, Store->getPointerOperand());
4780         evaluatePtrUse(Store, Store->getValueOperand());
4781       }
4782     }
4783   for (auto *I : ScalarPtrs)
4784     if (!PossibleNonScalarPtrs.count(I)) {
4785       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4786       Worklist.insert(I);
4787     }
4788 
4789   // Insert the forced scalars.
4790   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4791   // induction variable when the PHI user is scalarized.
4792   auto ForcedScalar = ForcedScalars.find(VF);
4793   if (ForcedScalar != ForcedScalars.end())
4794     for (auto *I : ForcedScalar->second)
4795       Worklist.insert(I);
4796 
4797   // Expand the worklist by looking through any bitcasts and getelementptr
4798   // instructions we've already identified as scalar. This is similar to the
4799   // expansion step in collectLoopUniforms(); however, here we're only
4800   // expanding to include additional bitcasts and getelementptr instructions.
4801   unsigned Idx = 0;
4802   while (Idx != Worklist.size()) {
4803     Instruction *Dst = Worklist[Idx++];
4804     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4805       continue;
4806     auto *Src = cast<Instruction>(Dst->getOperand(0));
4807     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4808           auto *J = cast<Instruction>(U);
4809           return !TheLoop->contains(J) || Worklist.count(J) ||
4810                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4811                   isScalarUse(J, Src));
4812         })) {
4813       Worklist.insert(Src);
4814       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4815     }
4816   }
4817 
4818   // An induction variable will remain scalar if all users of the induction
4819   // variable and induction variable update remain scalar.
4820   for (auto &Induction : Legal->getInductionVars()) {
4821     auto *Ind = Induction.first;
4822     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4823 
4824     // If tail-folding is applied, the primary induction variable will be used
4825     // to feed a vector compare.
4826     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4827       continue;
4828 
4829     // Returns true if \p Indvar is a pointer induction that is used directly by
4830     // load/store instruction \p I.
4831     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4832                                               Instruction *I) {
4833       return Induction.second.getKind() ==
4834                  InductionDescriptor::IK_PtrInduction &&
4835              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4836              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4837     };
4838 
4839     // Determine if all users of the induction variable are scalar after
4840     // vectorization.
4841     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4842       auto *I = cast<Instruction>(U);
4843       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4844              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4845     });
4846     if (!ScalarInd)
4847       continue;
4848 
4849     // Determine if all users of the induction variable update instruction are
4850     // scalar after vectorization.
4851     auto ScalarIndUpdate =
4852         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4853           auto *I = cast<Instruction>(U);
4854           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4855                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4856         });
4857     if (!ScalarIndUpdate)
4858       continue;
4859 
4860     // The induction variable and its update instruction will remain scalar.
4861     Worklist.insert(Ind);
4862     Worklist.insert(IndUpdate);
4863     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4864     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4865                       << "\n");
4866   }
4867 
4868   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4869 }
4870 
4871 bool LoopVectorizationCostModel::isScalarWithPredication(
4872     Instruction *I, ElementCount VF) const {
4873   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4874     return false;
4875   switch(I->getOpcode()) {
4876   default:
4877     break;
4878   case Instruction::Load:
4879   case Instruction::Store: {
4880     if (!Legal->isMaskRequired(I))
4881       return false;
4882     auto *Ptr = getLoadStorePointerOperand(I);
4883     auto *Ty = getLoadStoreType(I);
4884     Type *VTy = Ty;
4885     if (VF.isVector())
4886       VTy = VectorType::get(Ty, VF);
4887     const Align Alignment = getLoadStoreAlignment(I);
4888     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4889                                 TTI.isLegalMaskedGather(VTy, Alignment))
4890                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4891                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4892   }
4893   case Instruction::UDiv:
4894   case Instruction::SDiv:
4895   case Instruction::SRem:
4896   case Instruction::URem:
4897     return mayDivideByZero(*I);
4898   }
4899   return false;
4900 }
4901 
4902 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4903     Instruction *I, ElementCount VF) {
4904   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4905   assert(getWideningDecision(I, VF) == CM_Unknown &&
4906          "Decision should not be set yet.");
4907   auto *Group = getInterleavedAccessGroup(I);
4908   assert(Group && "Must have a group.");
4909 
4910   // If the instruction's allocated size doesn't equal it's type size, it
4911   // requires padding and will be scalarized.
4912   auto &DL = I->getModule()->getDataLayout();
4913   auto *ScalarTy = getLoadStoreType(I);
4914   if (hasIrregularType(ScalarTy, DL))
4915     return false;
4916 
4917   // Check if masking is required.
4918   // A Group may need masking for one of two reasons: it resides in a block that
4919   // needs predication, or it was decided to use masking to deal with gaps
4920   // (either a gap at the end of a load-access that may result in a speculative
4921   // load, or any gaps in a store-access).
4922   bool PredicatedAccessRequiresMasking =
4923       blockNeedsPredicationForAnyReason(I->getParent()) &&
4924       Legal->isMaskRequired(I);
4925   bool LoadAccessWithGapsRequiresEpilogMasking =
4926       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4927       !isScalarEpilogueAllowed();
4928   bool StoreAccessWithGapsRequiresMasking =
4929       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4930   if (!PredicatedAccessRequiresMasking &&
4931       !LoadAccessWithGapsRequiresEpilogMasking &&
4932       !StoreAccessWithGapsRequiresMasking)
4933     return true;
4934 
4935   // If masked interleaving is required, we expect that the user/target had
4936   // enabled it, because otherwise it either wouldn't have been created or
4937   // it should have been invalidated by the CostModel.
4938   assert(useMaskedInterleavedAccesses(TTI) &&
4939          "Masked interleave-groups for predicated accesses are not enabled.");
4940 
4941   if (Group->isReverse())
4942     return false;
4943 
4944   auto *Ty = getLoadStoreType(I);
4945   const Align Alignment = getLoadStoreAlignment(I);
4946   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4947                           : TTI.isLegalMaskedStore(Ty, Alignment);
4948 }
4949 
4950 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4951     Instruction *I, ElementCount VF) {
4952   // Get and ensure we have a valid memory instruction.
4953   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4954 
4955   auto *Ptr = getLoadStorePointerOperand(I);
4956   auto *ScalarTy = getLoadStoreType(I);
4957 
4958   // In order to be widened, the pointer should be consecutive, first of all.
4959   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4960     return false;
4961 
4962   // If the instruction is a store located in a predicated block, it will be
4963   // scalarized.
4964   if (isScalarWithPredication(I, VF))
4965     return false;
4966 
4967   // If the instruction's allocated size doesn't equal it's type size, it
4968   // requires padding and will be scalarized.
4969   auto &DL = I->getModule()->getDataLayout();
4970   if (hasIrregularType(ScalarTy, DL))
4971     return false;
4972 
4973   return true;
4974 }
4975 
4976 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4977   // We should not collect Uniforms more than once per VF. Right now,
4978   // this function is called from collectUniformsAndScalars(), which
4979   // already does this check. Collecting Uniforms for VF=1 does not make any
4980   // sense.
4981 
4982   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4983          "This function should not be visited twice for the same VF");
4984 
4985   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4986   // not analyze again.  Uniforms.count(VF) will return 1.
4987   Uniforms[VF].clear();
4988 
4989   // We now know that the loop is vectorizable!
4990   // Collect instructions inside the loop that will remain uniform after
4991   // vectorization.
4992 
4993   // Global values, params and instructions outside of current loop are out of
4994   // scope.
4995   auto isOutOfScope = [&](Value *V) -> bool {
4996     Instruction *I = dyn_cast<Instruction>(V);
4997     return (!I || !TheLoop->contains(I));
4998   };
4999 
5000   // Worklist containing uniform instructions demanding lane 0.
5001   SetVector<Instruction *> Worklist;
5002   BasicBlock *Latch = TheLoop->getLoopLatch();
5003 
5004   // Add uniform instructions demanding lane 0 to the worklist. Instructions
5005   // that are scalar with predication must not be considered uniform after
5006   // vectorization, because that would create an erroneous replicating region
5007   // where only a single instance out of VF should be formed.
5008   // TODO: optimize such seldom cases if found important, see PR40816.
5009   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5010     if (isOutOfScope(I)) {
5011       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5012                         << *I << "\n");
5013       return;
5014     }
5015     if (isScalarWithPredication(I, VF)) {
5016       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5017                         << *I << "\n");
5018       return;
5019     }
5020     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5021     Worklist.insert(I);
5022   };
5023 
5024   // Start with the conditional branch. If the branch condition is an
5025   // instruction contained in the loop that is only used by the branch, it is
5026   // uniform.
5027   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5028   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5029     addToWorklistIfAllowed(Cmp);
5030 
5031   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5032     InstWidening WideningDecision = getWideningDecision(I, VF);
5033     assert(WideningDecision != CM_Unknown &&
5034            "Widening decision should be ready at this moment");
5035 
5036     // A uniform memory op is itself uniform.  We exclude uniform stores
5037     // here as they demand the last lane, not the first one.
5038     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5039       assert(WideningDecision == CM_Scalarize);
5040       return true;
5041     }
5042 
5043     return (WideningDecision == CM_Widen ||
5044             WideningDecision == CM_Widen_Reverse ||
5045             WideningDecision == CM_Interleave);
5046   };
5047 
5048 
5049   // Returns true if Ptr is the pointer operand of a memory access instruction
5050   // I, and I is known to not require scalarization.
5051   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5052     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5053   };
5054 
5055   // Holds a list of values which are known to have at least one uniform use.
5056   // Note that there may be other uses which aren't uniform.  A "uniform use"
5057   // here is something which only demands lane 0 of the unrolled iterations;
5058   // it does not imply that all lanes produce the same value (e.g. this is not
5059   // the usual meaning of uniform)
5060   SetVector<Value *> HasUniformUse;
5061 
5062   // Scan the loop for instructions which are either a) known to have only
5063   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5064   for (auto *BB : TheLoop->blocks())
5065     for (auto &I : *BB) {
5066       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5067         switch (II->getIntrinsicID()) {
5068         case Intrinsic::sideeffect:
5069         case Intrinsic::experimental_noalias_scope_decl:
5070         case Intrinsic::assume:
5071         case Intrinsic::lifetime_start:
5072         case Intrinsic::lifetime_end:
5073           if (TheLoop->hasLoopInvariantOperands(&I))
5074             addToWorklistIfAllowed(&I);
5075           break;
5076         default:
5077           break;
5078         }
5079       }
5080 
5081       // ExtractValue instructions must be uniform, because the operands are
5082       // known to be loop-invariant.
5083       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5084         assert(isOutOfScope(EVI->getAggregateOperand()) &&
5085                "Expected aggregate value to be loop invariant");
5086         addToWorklistIfAllowed(EVI);
5087         continue;
5088       }
5089 
5090       // If there's no pointer operand, there's nothing to do.
5091       auto *Ptr = getLoadStorePointerOperand(&I);
5092       if (!Ptr)
5093         continue;
5094 
5095       // A uniform memory op is itself uniform.  We exclude uniform stores
5096       // here as they demand the last lane, not the first one.
5097       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5098         addToWorklistIfAllowed(&I);
5099 
5100       if (isUniformDecision(&I, VF)) {
5101         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5102         HasUniformUse.insert(Ptr);
5103       }
5104     }
5105 
5106   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5107   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5108   // disallows uses outside the loop as well.
5109   for (auto *V : HasUniformUse) {
5110     if (isOutOfScope(V))
5111       continue;
5112     auto *I = cast<Instruction>(V);
5113     auto UsersAreMemAccesses =
5114       llvm::all_of(I->users(), [&](User *U) -> bool {
5115         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5116       });
5117     if (UsersAreMemAccesses)
5118       addToWorklistIfAllowed(I);
5119   }
5120 
5121   // Expand Worklist in topological order: whenever a new instruction
5122   // is added , its users should be already inside Worklist.  It ensures
5123   // a uniform instruction will only be used by uniform instructions.
5124   unsigned idx = 0;
5125   while (idx != Worklist.size()) {
5126     Instruction *I = Worklist[idx++];
5127 
5128     for (auto OV : I->operand_values()) {
5129       // isOutOfScope operands cannot be uniform instructions.
5130       if (isOutOfScope(OV))
5131         continue;
5132       // First order recurrence Phi's should typically be considered
5133       // non-uniform.
5134       auto *OP = dyn_cast<PHINode>(OV);
5135       if (OP && Legal->isFirstOrderRecurrence(OP))
5136         continue;
5137       // If all the users of the operand are uniform, then add the
5138       // operand into the uniform worklist.
5139       auto *OI = cast<Instruction>(OV);
5140       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5141             auto *J = cast<Instruction>(U);
5142             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5143           }))
5144         addToWorklistIfAllowed(OI);
5145     }
5146   }
5147 
5148   // For an instruction to be added into Worklist above, all its users inside
5149   // the loop should also be in Worklist. However, this condition cannot be
5150   // true for phi nodes that form a cyclic dependence. We must process phi
5151   // nodes separately. An induction variable will remain uniform if all users
5152   // of the induction variable and induction variable update remain uniform.
5153   // The code below handles both pointer and non-pointer induction variables.
5154   for (auto &Induction : Legal->getInductionVars()) {
5155     auto *Ind = Induction.first;
5156     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5157 
5158     // Determine if all users of the induction variable are uniform after
5159     // vectorization.
5160     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5161       auto *I = cast<Instruction>(U);
5162       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5163              isVectorizedMemAccessUse(I, Ind);
5164     });
5165     if (!UniformInd)
5166       continue;
5167 
5168     // Determine if all users of the induction variable update instruction are
5169     // uniform after vectorization.
5170     auto UniformIndUpdate =
5171         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5172           auto *I = cast<Instruction>(U);
5173           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5174                  isVectorizedMemAccessUse(I, IndUpdate);
5175         });
5176     if (!UniformIndUpdate)
5177       continue;
5178 
5179     // The induction variable and its update instruction will remain uniform.
5180     addToWorklistIfAllowed(Ind);
5181     addToWorklistIfAllowed(IndUpdate);
5182   }
5183 
5184   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5185 }
5186 
5187 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5188   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5189 
5190   if (Legal->getRuntimePointerChecking()->Need) {
5191     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5192         "runtime pointer checks needed. Enable vectorization of this "
5193         "loop with '#pragma clang loop vectorize(enable)' when "
5194         "compiling with -Os/-Oz",
5195         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5196     return true;
5197   }
5198 
5199   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5200     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5201         "runtime SCEV checks needed. Enable vectorization of this "
5202         "loop with '#pragma clang loop vectorize(enable)' when "
5203         "compiling with -Os/-Oz",
5204         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5205     return true;
5206   }
5207 
5208   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5209   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5210     reportVectorizationFailure("Runtime stride check for small trip count",
5211         "runtime stride == 1 checks needed. Enable vectorization of "
5212         "this loop without such check by compiling with -Os/-Oz",
5213         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5214     return true;
5215   }
5216 
5217   return false;
5218 }
5219 
5220 ElementCount
5221 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5222   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5223     return ElementCount::getScalable(0);
5224 
5225   if (Hints->isScalableVectorizationDisabled()) {
5226     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5227                             "ScalableVectorizationDisabled", ORE, TheLoop);
5228     return ElementCount::getScalable(0);
5229   }
5230 
5231   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5232 
5233   auto MaxScalableVF = ElementCount::getScalable(
5234       std::numeric_limits<ElementCount::ScalarTy>::max());
5235 
5236   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5237   // FIXME: While for scalable vectors this is currently sufficient, this should
5238   // be replaced by a more detailed mechanism that filters out specific VFs,
5239   // instead of invalidating vectorization for a whole set of VFs based on the
5240   // MaxVF.
5241 
5242   // Disable scalable vectorization if the loop contains unsupported reductions.
5243   if (!canVectorizeReductions(MaxScalableVF)) {
5244     reportVectorizationInfo(
5245         "Scalable vectorization not supported for the reduction "
5246         "operations found in this loop.",
5247         "ScalableVFUnfeasible", ORE, TheLoop);
5248     return ElementCount::getScalable(0);
5249   }
5250 
5251   // Disable scalable vectorization if the loop contains any instructions
5252   // with element types not supported for scalable vectors.
5253   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5254         return !Ty->isVoidTy() &&
5255                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5256       })) {
5257     reportVectorizationInfo("Scalable vectorization is not supported "
5258                             "for all element types found in this loop.",
5259                             "ScalableVFUnfeasible", ORE, TheLoop);
5260     return ElementCount::getScalable(0);
5261   }
5262 
5263   if (Legal->isSafeForAnyVectorWidth())
5264     return MaxScalableVF;
5265 
5266   // Limit MaxScalableVF by the maximum safe dependence distance.
5267   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5268   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5269     MaxVScale =
5270         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5271   MaxScalableVF = ElementCount::getScalable(
5272       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5273   if (!MaxScalableVF)
5274     reportVectorizationInfo(
5275         "Max legal vector width too small, scalable vectorization "
5276         "unfeasible.",
5277         "ScalableVFUnfeasible", ORE, TheLoop);
5278 
5279   return MaxScalableVF;
5280 }
5281 
5282 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5283     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5284   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5285   unsigned SmallestType, WidestType;
5286   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5287 
5288   // Get the maximum safe dependence distance in bits computed by LAA.
5289   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5290   // the memory accesses that is most restrictive (involved in the smallest
5291   // dependence distance).
5292   unsigned MaxSafeElements =
5293       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5294 
5295   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5296   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5297 
5298   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5299                     << ".\n");
5300   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5301                     << ".\n");
5302 
5303   // First analyze the UserVF, fall back if the UserVF should be ignored.
5304   if (UserVF) {
5305     auto MaxSafeUserVF =
5306         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5307 
5308     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5309       // If `VF=vscale x N` is safe, then so is `VF=N`
5310       if (UserVF.isScalable())
5311         return FixedScalableVFPair(
5312             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5313       else
5314         return UserVF;
5315     }
5316 
5317     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5318 
5319     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5320     // is better to ignore the hint and let the compiler choose a suitable VF.
5321     if (!UserVF.isScalable()) {
5322       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5323                         << " is unsafe, clamping to max safe VF="
5324                         << MaxSafeFixedVF << ".\n");
5325       ORE->emit([&]() {
5326         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5327                                           TheLoop->getStartLoc(),
5328                                           TheLoop->getHeader())
5329                << "User-specified vectorization factor "
5330                << ore::NV("UserVectorizationFactor", UserVF)
5331                << " is unsafe, clamping to maximum safe vectorization factor "
5332                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5333       });
5334       return MaxSafeFixedVF;
5335     }
5336 
5337     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5338       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5339                         << " is ignored because scalable vectors are not "
5340                            "available.\n");
5341       ORE->emit([&]() {
5342         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5343                                           TheLoop->getStartLoc(),
5344                                           TheLoop->getHeader())
5345                << "User-specified vectorization factor "
5346                << ore::NV("UserVectorizationFactor", UserVF)
5347                << " is ignored because the target does not support scalable "
5348                   "vectors. The compiler will pick a more suitable value.";
5349       });
5350     } else {
5351       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5352                         << " is unsafe. Ignoring scalable UserVF.\n");
5353       ORE->emit([&]() {
5354         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5355                                           TheLoop->getStartLoc(),
5356                                           TheLoop->getHeader())
5357                << "User-specified vectorization factor "
5358                << ore::NV("UserVectorizationFactor", UserVF)
5359                << " is unsafe. Ignoring the hint to let the compiler pick a "
5360                   "more suitable value.";
5361       });
5362     }
5363   }
5364 
5365   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5366                     << " / " << WidestType << " bits.\n");
5367 
5368   FixedScalableVFPair Result(ElementCount::getFixed(1),
5369                              ElementCount::getScalable(0));
5370   if (auto MaxVF =
5371           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5372                                   MaxSafeFixedVF, FoldTailByMasking))
5373     Result.FixedVF = MaxVF;
5374 
5375   if (auto MaxVF =
5376           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5377                                   MaxSafeScalableVF, FoldTailByMasking))
5378     if (MaxVF.isScalable()) {
5379       Result.ScalableVF = MaxVF;
5380       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5381                         << "\n");
5382     }
5383 
5384   return Result;
5385 }
5386 
5387 FixedScalableVFPair
5388 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5389   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5390     // TODO: It may by useful to do since it's still likely to be dynamically
5391     // uniform if the target can skip.
5392     reportVectorizationFailure(
5393         "Not inserting runtime ptr check for divergent target",
5394         "runtime pointer checks needed. Not enabled for divergent target",
5395         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5396     return FixedScalableVFPair::getNone();
5397   }
5398 
5399   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5400   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5401   if (TC == 1) {
5402     reportVectorizationFailure("Single iteration (non) loop",
5403         "loop trip count is one, irrelevant for vectorization",
5404         "SingleIterationLoop", ORE, TheLoop);
5405     return FixedScalableVFPair::getNone();
5406   }
5407 
5408   switch (ScalarEpilogueStatus) {
5409   case CM_ScalarEpilogueAllowed:
5410     return computeFeasibleMaxVF(TC, UserVF, false);
5411   case CM_ScalarEpilogueNotAllowedUsePredicate:
5412     LLVM_FALLTHROUGH;
5413   case CM_ScalarEpilogueNotNeededUsePredicate:
5414     LLVM_DEBUG(
5415         dbgs() << "LV: vector predicate hint/switch found.\n"
5416                << "LV: Not allowing scalar epilogue, creating predicated "
5417                << "vector loop.\n");
5418     break;
5419   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5420     // fallthrough as a special case of OptForSize
5421   case CM_ScalarEpilogueNotAllowedOptSize:
5422     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5423       LLVM_DEBUG(
5424           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5425     else
5426       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5427                         << "count.\n");
5428 
5429     // Bail if runtime checks are required, which are not good when optimising
5430     // for size.
5431     if (runtimeChecksRequired())
5432       return FixedScalableVFPair::getNone();
5433 
5434     break;
5435   }
5436 
5437   // The only loops we can vectorize without a scalar epilogue, are loops with
5438   // a bottom-test and a single exiting block. We'd have to handle the fact
5439   // that not every instruction executes on the last iteration.  This will
5440   // require a lane mask which varies through the vector loop body.  (TODO)
5441   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5442     // If there was a tail-folding hint/switch, but we can't fold the tail by
5443     // masking, fallback to a vectorization with a scalar epilogue.
5444     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5445       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5446                            "scalar epilogue instead.\n");
5447       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5448       return computeFeasibleMaxVF(TC, UserVF, false);
5449     }
5450     return FixedScalableVFPair::getNone();
5451   }
5452 
5453   // Now try the tail folding
5454 
5455   // Invalidate interleave groups that require an epilogue if we can't mask
5456   // the interleave-group.
5457   if (!useMaskedInterleavedAccesses(TTI)) {
5458     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5459            "No decisions should have been taken at this point");
5460     // Note: There is no need to invalidate any cost modeling decisions here, as
5461     // non where taken so far.
5462     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5463   }
5464 
5465   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5466   // Avoid tail folding if the trip count is known to be a multiple of any VF
5467   // we chose.
5468   // FIXME: The condition below pessimises the case for fixed-width vectors,
5469   // when scalable VFs are also candidates for vectorization.
5470   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5471     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5472     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5473            "MaxFixedVF must be a power of 2");
5474     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5475                                    : MaxFixedVF.getFixedValue();
5476     ScalarEvolution *SE = PSE.getSE();
5477     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5478     const SCEV *ExitCount = SE->getAddExpr(
5479         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5480     const SCEV *Rem = SE->getURemExpr(
5481         SE->applyLoopGuards(ExitCount, TheLoop),
5482         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5483     if (Rem->isZero()) {
5484       // Accept MaxFixedVF if we do not have a tail.
5485       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5486       return MaxFactors;
5487     }
5488   }
5489 
5490   // For scalable vectors don't use tail folding for low trip counts or
5491   // optimizing for code size. We only permit this if the user has explicitly
5492   // requested it.
5493   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5494       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5495       MaxFactors.ScalableVF.isVector())
5496     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5497 
5498   // If we don't know the precise trip count, or if the trip count that we
5499   // found modulo the vectorization factor is not zero, try to fold the tail
5500   // by masking.
5501   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5502   if (Legal->prepareToFoldTailByMasking()) {
5503     FoldTailByMasking = true;
5504     return MaxFactors;
5505   }
5506 
5507   // If there was a tail-folding hint/switch, but we can't fold the tail by
5508   // masking, fallback to a vectorization with a scalar epilogue.
5509   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5510     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5511                          "scalar epilogue instead.\n");
5512     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5513     return MaxFactors;
5514   }
5515 
5516   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5517     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5518     return FixedScalableVFPair::getNone();
5519   }
5520 
5521   if (TC == 0) {
5522     reportVectorizationFailure(
5523         "Unable to calculate the loop count due to complex control flow",
5524         "unable to calculate the loop count due to complex control flow",
5525         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5526     return FixedScalableVFPair::getNone();
5527   }
5528 
5529   reportVectorizationFailure(
5530       "Cannot optimize for size and vectorize at the same time.",
5531       "cannot optimize for size and vectorize at the same time. "
5532       "Enable vectorization of this loop with '#pragma clang loop "
5533       "vectorize(enable)' when compiling with -Os/-Oz",
5534       "NoTailLoopWithOptForSize", ORE, TheLoop);
5535   return FixedScalableVFPair::getNone();
5536 }
5537 
5538 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5539     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5540     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5541   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5542   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5543       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5544                            : TargetTransformInfo::RGK_FixedWidthVector);
5545 
5546   // Convenience function to return the minimum of two ElementCounts.
5547   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5548     assert((LHS.isScalable() == RHS.isScalable()) &&
5549            "Scalable flags must match");
5550     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5551   };
5552 
5553   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5554   // Note that both WidestRegister and WidestType may not be a powers of 2.
5555   auto MaxVectorElementCount = ElementCount::get(
5556       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5557       ComputeScalableMaxVF);
5558   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5559   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5560                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5561 
5562   if (!MaxVectorElementCount) {
5563     LLVM_DEBUG(dbgs() << "LV: The target has no "
5564                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5565                       << " vector registers.\n");
5566     return ElementCount::getFixed(1);
5567   }
5568 
5569   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5570   if (ConstTripCount &&
5571       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5572       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5573     // If loop trip count (TC) is known at compile time there is no point in
5574     // choosing VF greater than TC (as done in the loop below). Select maximum
5575     // power of two which doesn't exceed TC.
5576     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5577     // when the TC is less than or equal to the known number of lanes.
5578     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5579     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5580                          "exceeding the constant trip count: "
5581                       << ClampedConstTripCount << "\n");
5582     return ElementCount::getFixed(ClampedConstTripCount);
5583   }
5584 
5585   ElementCount MaxVF = MaxVectorElementCount;
5586   if (TTI.shouldMaximizeVectorBandwidth() ||
5587       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5588     auto MaxVectorElementCountMaxBW = ElementCount::get(
5589         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5590         ComputeScalableMaxVF);
5591     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5592 
5593     // Collect all viable vectorization factors larger than the default MaxVF
5594     // (i.e. MaxVectorElementCount).
5595     SmallVector<ElementCount, 8> VFs;
5596     for (ElementCount VS = MaxVectorElementCount * 2;
5597          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5598       VFs.push_back(VS);
5599 
5600     // For each VF calculate its register usage.
5601     auto RUs = calculateRegisterUsage(VFs);
5602 
5603     // Select the largest VF which doesn't require more registers than existing
5604     // ones.
5605     for (int i = RUs.size() - 1; i >= 0; --i) {
5606       bool Selected = true;
5607       for (auto &pair : RUs[i].MaxLocalUsers) {
5608         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5609         if (pair.second > TargetNumRegisters)
5610           Selected = false;
5611       }
5612       if (Selected) {
5613         MaxVF = VFs[i];
5614         break;
5615       }
5616     }
5617     if (ElementCount MinVF =
5618             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5619       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5620         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5621                           << ") with target's minimum: " << MinVF << '\n');
5622         MaxVF = MinVF;
5623       }
5624     }
5625   }
5626   return MaxVF;
5627 }
5628 
5629 bool LoopVectorizationCostModel::isMoreProfitable(
5630     const VectorizationFactor &A, const VectorizationFactor &B) const {
5631   InstructionCost CostA = A.Cost;
5632   InstructionCost CostB = B.Cost;
5633 
5634   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5635 
5636   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5637       MaxTripCount) {
5638     // If we are folding the tail and the trip count is a known (possibly small)
5639     // constant, the trip count will be rounded up to an integer number of
5640     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5641     // which we compare directly. When not folding the tail, the total cost will
5642     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5643     // approximated with the per-lane cost below instead of using the tripcount
5644     // as here.
5645     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5646     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5647     return RTCostA < RTCostB;
5648   }
5649 
5650   // Improve estimate for the vector width if it is scalable.
5651   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5652   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5653   if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) {
5654     if (A.Width.isScalable())
5655       EstimatedWidthA *= VScale.getValue();
5656     if (B.Width.isScalable())
5657       EstimatedWidthB *= VScale.getValue();
5658   }
5659 
5660   // Assume vscale may be larger than 1 (or the value being tuned for),
5661   // so that scalable vectorization is slightly favorable over fixed-width
5662   // vectorization.
5663   if (A.Width.isScalable() && !B.Width.isScalable())
5664     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5665 
5666   // To avoid the need for FP division:
5667   //      (CostA / A.Width) < (CostB / B.Width)
5668   // <=>  (CostA * B.Width) < (CostB * A.Width)
5669   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5670 }
5671 
5672 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5673     const ElementCountSet &VFCandidates) {
5674   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5675   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5676   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5677   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5678          "Expected Scalar VF to be a candidate");
5679 
5680   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5681   VectorizationFactor ChosenFactor = ScalarCost;
5682 
5683   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5684   if (ForceVectorization && VFCandidates.size() > 1) {
5685     // Ignore scalar width, because the user explicitly wants vectorization.
5686     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5687     // evaluation.
5688     ChosenFactor.Cost = InstructionCost::getMax();
5689   }
5690 
5691   SmallVector<InstructionVFPair> InvalidCosts;
5692   for (const auto &i : VFCandidates) {
5693     // The cost for scalar VF=1 is already calculated, so ignore it.
5694     if (i.isScalar())
5695       continue;
5696 
5697     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5698     VectorizationFactor Candidate(i, C.first);
5699 
5700 #ifndef NDEBUG
5701     unsigned AssumedMinimumVscale = 1;
5702     if (Optional<unsigned> VScale = TTI.getVScaleForTuning())
5703       AssumedMinimumVscale = VScale.getValue();
5704     unsigned Width =
5705         Candidate.Width.isScalable()
5706             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5707             : Candidate.Width.getFixedValue();
5708     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5709                       << " costs: " << (Candidate.Cost / Width));
5710     if (i.isScalable())
5711       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5712                         << AssumedMinimumVscale << ")");
5713     LLVM_DEBUG(dbgs() << ".\n");
5714 #endif
5715 
5716     if (!C.second && !ForceVectorization) {
5717       LLVM_DEBUG(
5718           dbgs() << "LV: Not considering vector loop of width " << i
5719                  << " because it will not generate any vector instructions.\n");
5720       continue;
5721     }
5722 
5723     // If profitable add it to ProfitableVF list.
5724     if (isMoreProfitable(Candidate, ScalarCost))
5725       ProfitableVFs.push_back(Candidate);
5726 
5727     if (isMoreProfitable(Candidate, ChosenFactor))
5728       ChosenFactor = Candidate;
5729   }
5730 
5731   // Emit a report of VFs with invalid costs in the loop.
5732   if (!InvalidCosts.empty()) {
5733     // Group the remarks per instruction, keeping the instruction order from
5734     // InvalidCosts.
5735     std::map<Instruction *, unsigned> Numbering;
5736     unsigned I = 0;
5737     for (auto &Pair : InvalidCosts)
5738       if (!Numbering.count(Pair.first))
5739         Numbering[Pair.first] = I++;
5740 
5741     // Sort the list, first on instruction(number) then on VF.
5742     llvm::sort(InvalidCosts,
5743                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5744                  if (Numbering[A.first] != Numbering[B.first])
5745                    return Numbering[A.first] < Numbering[B.first];
5746                  ElementCountComparator ECC;
5747                  return ECC(A.second, B.second);
5748                });
5749 
5750     // For a list of ordered instruction-vf pairs:
5751     //   [(load, vf1), (load, vf2), (store, vf1)]
5752     // Group the instructions together to emit separate remarks for:
5753     //   load  (vf1, vf2)
5754     //   store (vf1)
5755     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5756     auto Subset = ArrayRef<InstructionVFPair>();
5757     do {
5758       if (Subset.empty())
5759         Subset = Tail.take_front(1);
5760 
5761       Instruction *I = Subset.front().first;
5762 
5763       // If the next instruction is different, or if there are no other pairs,
5764       // emit a remark for the collated subset. e.g.
5765       //   [(load, vf1), (load, vf2))]
5766       // to emit:
5767       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5768       if (Subset == Tail || Tail[Subset.size()].first != I) {
5769         std::string OutString;
5770         raw_string_ostream OS(OutString);
5771         assert(!Subset.empty() && "Unexpected empty range");
5772         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5773         for (auto &Pair : Subset)
5774           OS << (Pair.second == Subset.front().second ? "" : ", ")
5775              << Pair.second;
5776         OS << "):";
5777         if (auto *CI = dyn_cast<CallInst>(I))
5778           OS << " call to " << CI->getCalledFunction()->getName();
5779         else
5780           OS << " " << I->getOpcodeName();
5781         OS.flush();
5782         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5783         Tail = Tail.drop_front(Subset.size());
5784         Subset = {};
5785       } else
5786         // Grow the subset by one element
5787         Subset = Tail.take_front(Subset.size() + 1);
5788     } while (!Tail.empty());
5789   }
5790 
5791   if (!EnableCondStoresVectorization && NumPredStores) {
5792     reportVectorizationFailure("There are conditional stores.",
5793         "store that is conditionally executed prevents vectorization",
5794         "ConditionalStore", ORE, TheLoop);
5795     ChosenFactor = ScalarCost;
5796   }
5797 
5798   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5799                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5800              << "LV: Vectorization seems to be not beneficial, "
5801              << "but was forced by a user.\n");
5802   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5803   return ChosenFactor;
5804 }
5805 
5806 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5807     const Loop &L, ElementCount VF) const {
5808   // Cross iteration phis such as reductions need special handling and are
5809   // currently unsupported.
5810   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5811         return Legal->isFirstOrderRecurrence(&Phi) ||
5812                Legal->isReductionVariable(&Phi);
5813       }))
5814     return false;
5815 
5816   // Phis with uses outside of the loop require special handling and are
5817   // currently unsupported.
5818   for (auto &Entry : Legal->getInductionVars()) {
5819     // Look for uses of the value of the induction at the last iteration.
5820     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5821     for (User *U : PostInc->users())
5822       if (!L.contains(cast<Instruction>(U)))
5823         return false;
5824     // Look for uses of penultimate value of the induction.
5825     for (User *U : Entry.first->users())
5826       if (!L.contains(cast<Instruction>(U)))
5827         return false;
5828   }
5829 
5830   // Induction variables that are widened require special handling that is
5831   // currently not supported.
5832   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5833         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5834                  this->isProfitableToScalarize(Entry.first, VF));
5835       }))
5836     return false;
5837 
5838   // Epilogue vectorization code has not been auditted to ensure it handles
5839   // non-latch exits properly.  It may be fine, but it needs auditted and
5840   // tested.
5841   if (L.getExitingBlock() != L.getLoopLatch())
5842     return false;
5843 
5844   return true;
5845 }
5846 
5847 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5848     const ElementCount VF) const {
5849   // FIXME: We need a much better cost-model to take different parameters such
5850   // as register pressure, code size increase and cost of extra branches into
5851   // account. For now we apply a very crude heuristic and only consider loops
5852   // with vectorization factors larger than a certain value.
5853   // We also consider epilogue vectorization unprofitable for targets that don't
5854   // consider interleaving beneficial (eg. MVE).
5855   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5856     return false;
5857   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5858     return true;
5859   return false;
5860 }
5861 
5862 VectorizationFactor
5863 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5864     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5865   VectorizationFactor Result = VectorizationFactor::Disabled();
5866   if (!EnableEpilogueVectorization) {
5867     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5868     return Result;
5869   }
5870 
5871   if (!isScalarEpilogueAllowed()) {
5872     LLVM_DEBUG(
5873         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5874                   "allowed.\n";);
5875     return Result;
5876   }
5877 
5878   // Not really a cost consideration, but check for unsupported cases here to
5879   // simplify the logic.
5880   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5881     LLVM_DEBUG(
5882         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5883                   "not a supported candidate.\n";);
5884     return Result;
5885   }
5886 
5887   if (EpilogueVectorizationForceVF > 1) {
5888     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5889     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5890     if (LVP.hasPlanWithVF(ForcedEC))
5891       return {ForcedEC, 0};
5892     else {
5893       LLVM_DEBUG(
5894           dbgs()
5895               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5896       return Result;
5897     }
5898   }
5899 
5900   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5901       TheLoop->getHeader()->getParent()->hasMinSize()) {
5902     LLVM_DEBUG(
5903         dbgs()
5904             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5905     return Result;
5906   }
5907 
5908   auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5909   if (MainLoopVF.isScalable())
5910     LLVM_DEBUG(
5911         dbgs() << "LEV: Epilogue vectorization using scalable vectors not "
5912                   "yet supported. Converting to fixed-width (VF="
5913                << FixedMainLoopVF << ") instead\n");
5914 
5915   if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) {
5916     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5917                          "this loop\n");
5918     return Result;
5919   }
5920 
5921   for (auto &NextVF : ProfitableVFs)
5922     if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) &&
5923         (Result.Width.getFixedValue() == 1 ||
5924          isMoreProfitable(NextVF, Result)) &&
5925         LVP.hasPlanWithVF(NextVF.Width))
5926       Result = NextVF;
5927 
5928   if (Result != VectorizationFactor::Disabled())
5929     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5930                       << Result.Width.getFixedValue() << "\n";);
5931   return Result;
5932 }
5933 
5934 std::pair<unsigned, unsigned>
5935 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5936   unsigned MinWidth = -1U;
5937   unsigned MaxWidth = 8;
5938   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5939   // For in-loop reductions, no element types are added to ElementTypesInLoop
5940   // if there are no loads/stores in the loop. In this case, check through the
5941   // reduction variables to determine the maximum width.
5942   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5943     // Reset MaxWidth so that we can find the smallest type used by recurrences
5944     // in the loop.
5945     MaxWidth = -1U;
5946     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5947       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5948       // When finding the min width used by the recurrence we need to account
5949       // for casts on the input operands of the recurrence.
5950       MaxWidth = std::min<unsigned>(
5951           MaxWidth, std::min<unsigned>(
5952                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5953                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5954     }
5955   } else {
5956     for (Type *T : ElementTypesInLoop) {
5957       MinWidth = std::min<unsigned>(
5958           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5959       MaxWidth = std::max<unsigned>(
5960           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5961     }
5962   }
5963   return {MinWidth, MaxWidth};
5964 }
5965 
5966 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5967   ElementTypesInLoop.clear();
5968   // For each block.
5969   for (BasicBlock *BB : TheLoop->blocks()) {
5970     // For each instruction in the loop.
5971     for (Instruction &I : BB->instructionsWithoutDebug()) {
5972       Type *T = I.getType();
5973 
5974       // Skip ignored values.
5975       if (ValuesToIgnore.count(&I))
5976         continue;
5977 
5978       // Only examine Loads, Stores and PHINodes.
5979       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5980         continue;
5981 
5982       // Examine PHI nodes that are reduction variables. Update the type to
5983       // account for the recurrence type.
5984       if (auto *PN = dyn_cast<PHINode>(&I)) {
5985         if (!Legal->isReductionVariable(PN))
5986           continue;
5987         const RecurrenceDescriptor &RdxDesc =
5988             Legal->getReductionVars().find(PN)->second;
5989         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5990             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5991                                       RdxDesc.getRecurrenceType(),
5992                                       TargetTransformInfo::ReductionFlags()))
5993           continue;
5994         T = RdxDesc.getRecurrenceType();
5995       }
5996 
5997       // Examine the stored values.
5998       if (auto *ST = dyn_cast<StoreInst>(&I))
5999         T = ST->getValueOperand()->getType();
6000 
6001       assert(T->isSized() &&
6002              "Expected the load/store/recurrence type to be sized");
6003 
6004       ElementTypesInLoop.insert(T);
6005     }
6006   }
6007 }
6008 
6009 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6010                                                            unsigned LoopCost) {
6011   // -- The interleave heuristics --
6012   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6013   // There are many micro-architectural considerations that we can't predict
6014   // at this level. For example, frontend pressure (on decode or fetch) due to
6015   // code size, or the number and capabilities of the execution ports.
6016   //
6017   // We use the following heuristics to select the interleave count:
6018   // 1. If the code has reductions, then we interleave to break the cross
6019   // iteration dependency.
6020   // 2. If the loop is really small, then we interleave to reduce the loop
6021   // overhead.
6022   // 3. We don't interleave if we think that we will spill registers to memory
6023   // due to the increased register pressure.
6024 
6025   if (!isScalarEpilogueAllowed())
6026     return 1;
6027 
6028   // We used the distance for the interleave count.
6029   if (Legal->getMaxSafeDepDistBytes() != -1U)
6030     return 1;
6031 
6032   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6033   const bool HasReductions = !Legal->getReductionVars().empty();
6034   // Do not interleave loops with a relatively small known or estimated trip
6035   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6036   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6037   // because with the above conditions interleaving can expose ILP and break
6038   // cross iteration dependences for reductions.
6039   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6040       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6041     return 1;
6042 
6043   RegisterUsage R = calculateRegisterUsage({VF})[0];
6044   // We divide by these constants so assume that we have at least one
6045   // instruction that uses at least one register.
6046   for (auto& pair : R.MaxLocalUsers) {
6047     pair.second = std::max(pair.second, 1U);
6048   }
6049 
6050   // We calculate the interleave count using the following formula.
6051   // Subtract the number of loop invariants from the number of available
6052   // registers. These registers are used by all of the interleaved instances.
6053   // Next, divide the remaining registers by the number of registers that is
6054   // required by the loop, in order to estimate how many parallel instances
6055   // fit without causing spills. All of this is rounded down if necessary to be
6056   // a power of two. We want power of two interleave count to simplify any
6057   // addressing operations or alignment considerations.
6058   // We also want power of two interleave counts to ensure that the induction
6059   // variable of the vector loop wraps to zero, when tail is folded by masking;
6060   // this currently happens when OptForSize, in which case IC is set to 1 above.
6061   unsigned IC = UINT_MAX;
6062 
6063   for (auto& pair : R.MaxLocalUsers) {
6064     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6065     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6066                       << " registers of "
6067                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6068     if (VF.isScalar()) {
6069       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6070         TargetNumRegisters = ForceTargetNumScalarRegs;
6071     } else {
6072       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6073         TargetNumRegisters = ForceTargetNumVectorRegs;
6074     }
6075     unsigned MaxLocalUsers = pair.second;
6076     unsigned LoopInvariantRegs = 0;
6077     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6078       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6079 
6080     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6081     // Don't count the induction variable as interleaved.
6082     if (EnableIndVarRegisterHeur) {
6083       TmpIC =
6084           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6085                         std::max(1U, (MaxLocalUsers - 1)));
6086     }
6087 
6088     IC = std::min(IC, TmpIC);
6089   }
6090 
6091   // Clamp the interleave ranges to reasonable counts.
6092   unsigned MaxInterleaveCount =
6093       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6094 
6095   // Check if the user has overridden the max.
6096   if (VF.isScalar()) {
6097     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6098       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6099   } else {
6100     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6101       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6102   }
6103 
6104   // If trip count is known or estimated compile time constant, limit the
6105   // interleave count to be less than the trip count divided by VF, provided it
6106   // is at least 1.
6107   //
6108   // For scalable vectors we can't know if interleaving is beneficial. It may
6109   // not be beneficial for small loops if none of the lanes in the second vector
6110   // iterations is enabled. However, for larger loops, there is likely to be a
6111   // similar benefit as for fixed-width vectors. For now, we choose to leave
6112   // the InterleaveCount as if vscale is '1', although if some information about
6113   // the vector is known (e.g. min vector size), we can make a better decision.
6114   if (BestKnownTC) {
6115     MaxInterleaveCount =
6116         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6117     // Make sure MaxInterleaveCount is greater than 0.
6118     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6119   }
6120 
6121   assert(MaxInterleaveCount > 0 &&
6122          "Maximum interleave count must be greater than 0");
6123 
6124   // Clamp the calculated IC to be between the 1 and the max interleave count
6125   // that the target and trip count allows.
6126   if (IC > MaxInterleaveCount)
6127     IC = MaxInterleaveCount;
6128   else
6129     // Make sure IC is greater than 0.
6130     IC = std::max(1u, IC);
6131 
6132   assert(IC > 0 && "Interleave count must be greater than 0.");
6133 
6134   // If we did not calculate the cost for VF (because the user selected the VF)
6135   // then we calculate the cost of VF here.
6136   if (LoopCost == 0) {
6137     InstructionCost C = expectedCost(VF).first;
6138     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
6139     LoopCost = *C.getValue();
6140   }
6141 
6142   assert(LoopCost && "Non-zero loop cost expected");
6143 
6144   // Interleave if we vectorized this loop and there is a reduction that could
6145   // benefit from interleaving.
6146   if (VF.isVector() && HasReductions) {
6147     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6148     return IC;
6149   }
6150 
6151   // Note that if we've already vectorized the loop we will have done the
6152   // runtime check and so interleaving won't require further checks.
6153   bool InterleavingRequiresRuntimePointerCheck =
6154       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6155 
6156   // We want to interleave small loops in order to reduce the loop overhead and
6157   // potentially expose ILP opportunities.
6158   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6159                     << "LV: IC is " << IC << '\n'
6160                     << "LV: VF is " << VF << '\n');
6161   const bool AggressivelyInterleaveReductions =
6162       TTI.enableAggressiveInterleaving(HasReductions);
6163   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6164     // We assume that the cost overhead is 1 and we use the cost model
6165     // to estimate the cost of the loop and interleave until the cost of the
6166     // loop overhead is about 5% of the cost of the loop.
6167     unsigned SmallIC =
6168         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6169 
6170     // Interleave until store/load ports (estimated by max interleave count) are
6171     // saturated.
6172     unsigned NumStores = Legal->getNumStores();
6173     unsigned NumLoads = Legal->getNumLoads();
6174     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6175     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6176 
6177     // There is little point in interleaving for reductions containing selects
6178     // and compares when VF=1 since it may just create more overhead than it's
6179     // worth for loops with small trip counts. This is because we still have to
6180     // do the final reduction after the loop.
6181     bool HasSelectCmpReductions =
6182         HasReductions &&
6183         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6184           const RecurrenceDescriptor &RdxDesc = Reduction.second;
6185           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
6186               RdxDesc.getRecurrenceKind());
6187         });
6188     if (HasSelectCmpReductions) {
6189       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
6190       return 1;
6191     }
6192 
6193     // If we have a scalar reduction (vector reductions are already dealt with
6194     // by this point), we can increase the critical path length if the loop
6195     // we're interleaving is inside another loop. For tree-wise reductions
6196     // set the limit to 2, and for ordered reductions it's best to disable
6197     // interleaving entirely.
6198     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6199       bool HasOrderedReductions =
6200           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6201             const RecurrenceDescriptor &RdxDesc = Reduction.second;
6202             return RdxDesc.isOrdered();
6203           });
6204       if (HasOrderedReductions) {
6205         LLVM_DEBUG(
6206             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
6207         return 1;
6208       }
6209 
6210       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6211       SmallIC = std::min(SmallIC, F);
6212       StoresIC = std::min(StoresIC, F);
6213       LoadsIC = std::min(LoadsIC, F);
6214     }
6215 
6216     if (EnableLoadStoreRuntimeInterleave &&
6217         std::max(StoresIC, LoadsIC) > SmallIC) {
6218       LLVM_DEBUG(
6219           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6220       return std::max(StoresIC, LoadsIC);
6221     }
6222 
6223     // If there are scalar reductions and TTI has enabled aggressive
6224     // interleaving for reductions, we will interleave to expose ILP.
6225     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6226         AggressivelyInterleaveReductions) {
6227       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6228       // Interleave no less than SmallIC but not as aggressive as the normal IC
6229       // to satisfy the rare situation when resources are too limited.
6230       return std::max(IC / 2, SmallIC);
6231     } else {
6232       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6233       return SmallIC;
6234     }
6235   }
6236 
6237   // Interleave if this is a large loop (small loops are already dealt with by
6238   // this point) that could benefit from interleaving.
6239   if (AggressivelyInterleaveReductions) {
6240     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6241     return IC;
6242   }
6243 
6244   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6245   return 1;
6246 }
6247 
6248 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6249 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6250   // This function calculates the register usage by measuring the highest number
6251   // of values that are alive at a single location. Obviously, this is a very
6252   // rough estimation. We scan the loop in a topological order in order and
6253   // assign a number to each instruction. We use RPO to ensure that defs are
6254   // met before their users. We assume that each instruction that has in-loop
6255   // users starts an interval. We record every time that an in-loop value is
6256   // used, so we have a list of the first and last occurrences of each
6257   // instruction. Next, we transpose this data structure into a multi map that
6258   // holds the list of intervals that *end* at a specific location. This multi
6259   // map allows us to perform a linear search. We scan the instructions linearly
6260   // and record each time that a new interval starts, by placing it in a set.
6261   // If we find this value in the multi-map then we remove it from the set.
6262   // The max register usage is the maximum size of the set.
6263   // We also search for instructions that are defined outside the loop, but are
6264   // used inside the loop. We need this number separately from the max-interval
6265   // usage number because when we unroll, loop-invariant values do not take
6266   // more register.
6267   LoopBlocksDFS DFS(TheLoop);
6268   DFS.perform(LI);
6269 
6270   RegisterUsage RU;
6271 
6272   // Each 'key' in the map opens a new interval. The values
6273   // of the map are the index of the 'last seen' usage of the
6274   // instruction that is the key.
6275   using IntervalMap = DenseMap<Instruction *, unsigned>;
6276 
6277   // Maps instruction to its index.
6278   SmallVector<Instruction *, 64> IdxToInstr;
6279   // Marks the end of each interval.
6280   IntervalMap EndPoint;
6281   // Saves the list of instruction indices that are used in the loop.
6282   SmallPtrSet<Instruction *, 8> Ends;
6283   // Saves the list of values that are used in the loop but are
6284   // defined outside the loop, such as arguments and constants.
6285   SmallPtrSet<Value *, 8> LoopInvariants;
6286 
6287   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6288     for (Instruction &I : BB->instructionsWithoutDebug()) {
6289       IdxToInstr.push_back(&I);
6290 
6291       // Save the end location of each USE.
6292       for (Value *U : I.operands()) {
6293         auto *Instr = dyn_cast<Instruction>(U);
6294 
6295         // Ignore non-instruction values such as arguments, constants, etc.
6296         if (!Instr)
6297           continue;
6298 
6299         // If this instruction is outside the loop then record it and continue.
6300         if (!TheLoop->contains(Instr)) {
6301           LoopInvariants.insert(Instr);
6302           continue;
6303         }
6304 
6305         // Overwrite previous end points.
6306         EndPoint[Instr] = IdxToInstr.size();
6307         Ends.insert(Instr);
6308       }
6309     }
6310   }
6311 
6312   // Saves the list of intervals that end with the index in 'key'.
6313   using InstrList = SmallVector<Instruction *, 2>;
6314   DenseMap<unsigned, InstrList> TransposeEnds;
6315 
6316   // Transpose the EndPoints to a list of values that end at each index.
6317   for (auto &Interval : EndPoint)
6318     TransposeEnds[Interval.second].push_back(Interval.first);
6319 
6320   SmallPtrSet<Instruction *, 8> OpenIntervals;
6321   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6322   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6323 
6324   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6325 
6326   // A lambda that gets the register usage for the given type and VF.
6327   const auto &TTICapture = TTI;
6328   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6329     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6330       return 0;
6331     InstructionCost::CostType RegUsage =
6332         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6333     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6334            "Nonsensical values for register usage.");
6335     return RegUsage;
6336   };
6337 
6338   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6339     Instruction *I = IdxToInstr[i];
6340 
6341     // Remove all of the instructions that end at this location.
6342     InstrList &List = TransposeEnds[i];
6343     for (Instruction *ToRemove : List)
6344       OpenIntervals.erase(ToRemove);
6345 
6346     // Ignore instructions that are never used within the loop.
6347     if (!Ends.count(I))
6348       continue;
6349 
6350     // Skip ignored values.
6351     if (ValuesToIgnore.count(I))
6352       continue;
6353 
6354     // For each VF find the maximum usage of registers.
6355     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6356       // Count the number of live intervals.
6357       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6358 
6359       if (VFs[j].isScalar()) {
6360         for (auto Inst : OpenIntervals) {
6361           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6362           if (RegUsage.find(ClassID) == RegUsage.end())
6363             RegUsage[ClassID] = 1;
6364           else
6365             RegUsage[ClassID] += 1;
6366         }
6367       } else {
6368         collectUniformsAndScalars(VFs[j]);
6369         for (auto Inst : OpenIntervals) {
6370           // Skip ignored values for VF > 1.
6371           if (VecValuesToIgnore.count(Inst))
6372             continue;
6373           if (isScalarAfterVectorization(Inst, VFs[j])) {
6374             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6375             if (RegUsage.find(ClassID) == RegUsage.end())
6376               RegUsage[ClassID] = 1;
6377             else
6378               RegUsage[ClassID] += 1;
6379           } else {
6380             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6381             if (RegUsage.find(ClassID) == RegUsage.end())
6382               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6383             else
6384               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6385           }
6386         }
6387       }
6388 
6389       for (auto& pair : RegUsage) {
6390         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6391           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6392         else
6393           MaxUsages[j][pair.first] = pair.second;
6394       }
6395     }
6396 
6397     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6398                       << OpenIntervals.size() << '\n');
6399 
6400     // Add the current instruction to the list of open intervals.
6401     OpenIntervals.insert(I);
6402   }
6403 
6404   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6405     SmallMapVector<unsigned, unsigned, 4> Invariant;
6406 
6407     for (auto Inst : LoopInvariants) {
6408       unsigned Usage =
6409           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6410       unsigned ClassID =
6411           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6412       if (Invariant.find(ClassID) == Invariant.end())
6413         Invariant[ClassID] = Usage;
6414       else
6415         Invariant[ClassID] += Usage;
6416     }
6417 
6418     LLVM_DEBUG({
6419       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6420       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6421              << " item\n";
6422       for (const auto &pair : MaxUsages[i]) {
6423         dbgs() << "LV(REG): RegisterClass: "
6424                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6425                << " registers\n";
6426       }
6427       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6428              << " item\n";
6429       for (const auto &pair : Invariant) {
6430         dbgs() << "LV(REG): RegisterClass: "
6431                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6432                << " registers\n";
6433       }
6434     });
6435 
6436     RU.LoopInvariantRegs = Invariant;
6437     RU.MaxLocalUsers = MaxUsages[i];
6438     RUs[i] = RU;
6439   }
6440 
6441   return RUs;
6442 }
6443 
6444 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6445                                                            ElementCount VF) {
6446   // TODO: Cost model for emulated masked load/store is completely
6447   // broken. This hack guides the cost model to use an artificially
6448   // high enough value to practically disable vectorization with such
6449   // operations, except where previously deployed legality hack allowed
6450   // using very low cost values. This is to avoid regressions coming simply
6451   // from moving "masked load/store" check from legality to cost model.
6452   // Masked Load/Gather emulation was previously never allowed.
6453   // Limited number of Masked Store/Scatter emulation was allowed.
6454   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6455   return isa<LoadInst>(I) ||
6456          (isa<StoreInst>(I) &&
6457           NumPredStores > NumberOfStoresToPredicate);
6458 }
6459 
6460 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6461   // If we aren't vectorizing the loop, or if we've already collected the
6462   // instructions to scalarize, there's nothing to do. Collection may already
6463   // have occurred if we have a user-selected VF and are now computing the
6464   // expected cost for interleaving.
6465   if (VF.isScalar() || VF.isZero() ||
6466       InstsToScalarize.find(VF) != InstsToScalarize.end())
6467     return;
6468 
6469   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6470   // not profitable to scalarize any instructions, the presence of VF in the
6471   // map will indicate that we've analyzed it already.
6472   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6473 
6474   // Find all the instructions that are scalar with predication in the loop and
6475   // determine if it would be better to not if-convert the blocks they are in.
6476   // If so, we also record the instructions to scalarize.
6477   for (BasicBlock *BB : TheLoop->blocks()) {
6478     if (!blockNeedsPredicationForAnyReason(BB))
6479       continue;
6480     for (Instruction &I : *BB)
6481       if (isScalarWithPredication(&I, VF)) {
6482         ScalarCostsTy ScalarCosts;
6483         // Do not apply discount if scalable, because that would lead to
6484         // invalid scalarization costs.
6485         // Do not apply discount logic if hacked cost is needed
6486         // for emulated masked memrefs.
6487         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6488             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6489           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6490         // Remember that BB will remain after vectorization.
6491         PredicatedBBsAfterVectorization.insert(BB);
6492       }
6493   }
6494 }
6495 
6496 int LoopVectorizationCostModel::computePredInstDiscount(
6497     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6498   assert(!isUniformAfterVectorization(PredInst, VF) &&
6499          "Instruction marked uniform-after-vectorization will be predicated");
6500 
6501   // Initialize the discount to zero, meaning that the scalar version and the
6502   // vector version cost the same.
6503   InstructionCost Discount = 0;
6504 
6505   // Holds instructions to analyze. The instructions we visit are mapped in
6506   // ScalarCosts. Those instructions are the ones that would be scalarized if
6507   // we find that the scalar version costs less.
6508   SmallVector<Instruction *, 8> Worklist;
6509 
6510   // Returns true if the given instruction can be scalarized.
6511   auto canBeScalarized = [&](Instruction *I) -> bool {
6512     // We only attempt to scalarize instructions forming a single-use chain
6513     // from the original predicated block that would otherwise be vectorized.
6514     // Although not strictly necessary, we give up on instructions we know will
6515     // already be scalar to avoid traversing chains that are unlikely to be
6516     // beneficial.
6517     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6518         isScalarAfterVectorization(I, VF))
6519       return false;
6520 
6521     // If the instruction is scalar with predication, it will be analyzed
6522     // separately. We ignore it within the context of PredInst.
6523     if (isScalarWithPredication(I, VF))
6524       return false;
6525 
6526     // If any of the instruction's operands are uniform after vectorization,
6527     // the instruction cannot be scalarized. This prevents, for example, a
6528     // masked load from being scalarized.
6529     //
6530     // We assume we will only emit a value for lane zero of an instruction
6531     // marked uniform after vectorization, rather than VF identical values.
6532     // Thus, if we scalarize an instruction that uses a uniform, we would
6533     // create uses of values corresponding to the lanes we aren't emitting code
6534     // for. This behavior can be changed by allowing getScalarValue to clone
6535     // the lane zero values for uniforms rather than asserting.
6536     for (Use &U : I->operands())
6537       if (auto *J = dyn_cast<Instruction>(U.get()))
6538         if (isUniformAfterVectorization(J, VF))
6539           return false;
6540 
6541     // Otherwise, we can scalarize the instruction.
6542     return true;
6543   };
6544 
6545   // Compute the expected cost discount from scalarizing the entire expression
6546   // feeding the predicated instruction. We currently only consider expressions
6547   // that are single-use instruction chains.
6548   Worklist.push_back(PredInst);
6549   while (!Worklist.empty()) {
6550     Instruction *I = Worklist.pop_back_val();
6551 
6552     // If we've already analyzed the instruction, there's nothing to do.
6553     if (ScalarCosts.find(I) != ScalarCosts.end())
6554       continue;
6555 
6556     // Compute the cost of the vector instruction. Note that this cost already
6557     // includes the scalarization overhead of the predicated instruction.
6558     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6559 
6560     // Compute the cost of the scalarized instruction. This cost is the cost of
6561     // the instruction as if it wasn't if-converted and instead remained in the
6562     // predicated block. We will scale this cost by block probability after
6563     // computing the scalarization overhead.
6564     InstructionCost ScalarCost =
6565         VF.getFixedValue() *
6566         getInstructionCost(I, ElementCount::getFixed(1)).first;
6567 
6568     // Compute the scalarization overhead of needed insertelement instructions
6569     // and phi nodes.
6570     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6571       ScalarCost += TTI.getScalarizationOverhead(
6572           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6573           APInt::getAllOnes(VF.getFixedValue()), true, false);
6574       ScalarCost +=
6575           VF.getFixedValue() *
6576           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6577     }
6578 
6579     // Compute the scalarization overhead of needed extractelement
6580     // instructions. For each of the instruction's operands, if the operand can
6581     // be scalarized, add it to the worklist; otherwise, account for the
6582     // overhead.
6583     for (Use &U : I->operands())
6584       if (auto *J = dyn_cast<Instruction>(U.get())) {
6585         assert(VectorType::isValidElementType(J->getType()) &&
6586                "Instruction has non-scalar type");
6587         if (canBeScalarized(J))
6588           Worklist.push_back(J);
6589         else if (needsExtract(J, VF)) {
6590           ScalarCost += TTI.getScalarizationOverhead(
6591               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6592               APInt::getAllOnes(VF.getFixedValue()), false, true);
6593         }
6594       }
6595 
6596     // Scale the total scalar cost by block probability.
6597     ScalarCost /= getReciprocalPredBlockProb();
6598 
6599     // Compute the discount. A non-negative discount means the vector version
6600     // of the instruction costs more, and scalarizing would be beneficial.
6601     Discount += VectorCost - ScalarCost;
6602     ScalarCosts[I] = ScalarCost;
6603   }
6604 
6605   return *Discount.getValue();
6606 }
6607 
6608 LoopVectorizationCostModel::VectorizationCostTy
6609 LoopVectorizationCostModel::expectedCost(
6610     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6611   VectorizationCostTy Cost;
6612 
6613   // For each block.
6614   for (BasicBlock *BB : TheLoop->blocks()) {
6615     VectorizationCostTy BlockCost;
6616 
6617     // For each instruction in the old loop.
6618     for (Instruction &I : BB->instructionsWithoutDebug()) {
6619       // Skip ignored values.
6620       if (ValuesToIgnore.count(&I) ||
6621           (VF.isVector() && VecValuesToIgnore.count(&I)))
6622         continue;
6623 
6624       VectorizationCostTy C = getInstructionCost(&I, VF);
6625 
6626       // Check if we should override the cost.
6627       if (C.first.isValid() &&
6628           ForceTargetInstructionCost.getNumOccurrences() > 0)
6629         C.first = InstructionCost(ForceTargetInstructionCost);
6630 
6631       // Keep a list of instructions with invalid costs.
6632       if (Invalid && !C.first.isValid())
6633         Invalid->emplace_back(&I, VF);
6634 
6635       BlockCost.first += C.first;
6636       BlockCost.second |= C.second;
6637       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6638                         << " for VF " << VF << " For instruction: " << I
6639                         << '\n');
6640     }
6641 
6642     // If we are vectorizing a predicated block, it will have been
6643     // if-converted. This means that the block's instructions (aside from
6644     // stores and instructions that may divide by zero) will now be
6645     // unconditionally executed. For the scalar case, we may not always execute
6646     // the predicated block, if it is an if-else block. Thus, scale the block's
6647     // cost by the probability of executing it. blockNeedsPredication from
6648     // Legal is used so as to not include all blocks in tail folded loops.
6649     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6650       BlockCost.first /= getReciprocalPredBlockProb();
6651 
6652     Cost.first += BlockCost.first;
6653     Cost.second |= BlockCost.second;
6654   }
6655 
6656   return Cost;
6657 }
6658 
6659 /// Gets Address Access SCEV after verifying that the access pattern
6660 /// is loop invariant except the induction variable dependence.
6661 ///
6662 /// This SCEV can be sent to the Target in order to estimate the address
6663 /// calculation cost.
6664 static const SCEV *getAddressAccessSCEV(
6665               Value *Ptr,
6666               LoopVectorizationLegality *Legal,
6667               PredicatedScalarEvolution &PSE,
6668               const Loop *TheLoop) {
6669 
6670   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6671   if (!Gep)
6672     return nullptr;
6673 
6674   // We are looking for a gep with all loop invariant indices except for one
6675   // which should be an induction variable.
6676   auto SE = PSE.getSE();
6677   unsigned NumOperands = Gep->getNumOperands();
6678   for (unsigned i = 1; i < NumOperands; ++i) {
6679     Value *Opd = Gep->getOperand(i);
6680     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6681         !Legal->isInductionVariable(Opd))
6682       return nullptr;
6683   }
6684 
6685   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6686   return PSE.getSCEV(Ptr);
6687 }
6688 
6689 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6690   return Legal->hasStride(I->getOperand(0)) ||
6691          Legal->hasStride(I->getOperand(1));
6692 }
6693 
6694 InstructionCost
6695 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6696                                                         ElementCount VF) {
6697   assert(VF.isVector() &&
6698          "Scalarization cost of instruction implies vectorization.");
6699   if (VF.isScalable())
6700     return InstructionCost::getInvalid();
6701 
6702   Type *ValTy = getLoadStoreType(I);
6703   auto SE = PSE.getSE();
6704 
6705   unsigned AS = getLoadStoreAddressSpace(I);
6706   Value *Ptr = getLoadStorePointerOperand(I);
6707   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6708   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6709   //       that it is being called from this specific place.
6710 
6711   // Figure out whether the access is strided and get the stride value
6712   // if it's known in compile time
6713   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6714 
6715   // Get the cost of the scalar memory instruction and address computation.
6716   InstructionCost Cost =
6717       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6718 
6719   // Don't pass *I here, since it is scalar but will actually be part of a
6720   // vectorized loop where the user of it is a vectorized instruction.
6721   const Align Alignment = getLoadStoreAlignment(I);
6722   Cost += VF.getKnownMinValue() *
6723           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6724                               AS, TTI::TCK_RecipThroughput);
6725 
6726   // Get the overhead of the extractelement and insertelement instructions
6727   // we might create due to scalarization.
6728   Cost += getScalarizationOverhead(I, VF);
6729 
6730   // If we have a predicated load/store, it will need extra i1 extracts and
6731   // conditional branches, but may not be executed for each vector lane. Scale
6732   // the cost by the probability of executing the predicated block.
6733   if (isPredicatedInst(I, VF)) {
6734     Cost /= getReciprocalPredBlockProb();
6735 
6736     // Add the cost of an i1 extract and a branch
6737     auto *Vec_i1Ty =
6738         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6739     Cost += TTI.getScalarizationOverhead(
6740         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6741         /*Insert=*/false, /*Extract=*/true);
6742     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6743 
6744     if (useEmulatedMaskMemRefHack(I, VF))
6745       // Artificially setting to a high enough value to practically disable
6746       // vectorization with such operations.
6747       Cost = 3000000;
6748   }
6749 
6750   return Cost;
6751 }
6752 
6753 InstructionCost
6754 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6755                                                     ElementCount VF) {
6756   Type *ValTy = getLoadStoreType(I);
6757   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6758   Value *Ptr = getLoadStorePointerOperand(I);
6759   unsigned AS = getLoadStoreAddressSpace(I);
6760   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6761   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6762 
6763   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6764          "Stride should be 1 or -1 for consecutive memory access");
6765   const Align Alignment = getLoadStoreAlignment(I);
6766   InstructionCost Cost = 0;
6767   if (Legal->isMaskRequired(I))
6768     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6769                                       CostKind);
6770   else
6771     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6772                                 CostKind, I);
6773 
6774   bool Reverse = ConsecutiveStride < 0;
6775   if (Reverse)
6776     Cost +=
6777         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6778   return Cost;
6779 }
6780 
6781 InstructionCost
6782 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6783                                                 ElementCount VF) {
6784   assert(Legal->isUniformMemOp(*I));
6785 
6786   Type *ValTy = getLoadStoreType(I);
6787   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6788   const Align Alignment = getLoadStoreAlignment(I);
6789   unsigned AS = getLoadStoreAddressSpace(I);
6790   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6791   if (isa<LoadInst>(I)) {
6792     return TTI.getAddressComputationCost(ValTy) +
6793            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6794                                CostKind) +
6795            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6796   }
6797   StoreInst *SI = cast<StoreInst>(I);
6798 
6799   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6800   return TTI.getAddressComputationCost(ValTy) +
6801          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6802                              CostKind) +
6803          (isLoopInvariantStoreValue
6804               ? 0
6805               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6806                                        VF.getKnownMinValue() - 1));
6807 }
6808 
6809 InstructionCost
6810 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6811                                                  ElementCount VF) {
6812   Type *ValTy = getLoadStoreType(I);
6813   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6814   const Align Alignment = getLoadStoreAlignment(I);
6815   const Value *Ptr = getLoadStorePointerOperand(I);
6816 
6817   return TTI.getAddressComputationCost(VectorTy) +
6818          TTI.getGatherScatterOpCost(
6819              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6820              TargetTransformInfo::TCK_RecipThroughput, I);
6821 }
6822 
6823 InstructionCost
6824 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6825                                                    ElementCount VF) {
6826   // TODO: Once we have support for interleaving with scalable vectors
6827   // we can calculate the cost properly here.
6828   if (VF.isScalable())
6829     return InstructionCost::getInvalid();
6830 
6831   Type *ValTy = getLoadStoreType(I);
6832   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6833   unsigned AS = getLoadStoreAddressSpace(I);
6834 
6835   auto Group = getInterleavedAccessGroup(I);
6836   assert(Group && "Fail to get an interleaved access group.");
6837 
6838   unsigned InterleaveFactor = Group->getFactor();
6839   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6840 
6841   // Holds the indices of existing members in the interleaved group.
6842   SmallVector<unsigned, 4> Indices;
6843   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6844     if (Group->getMember(IF))
6845       Indices.push_back(IF);
6846 
6847   // Calculate the cost of the whole interleaved group.
6848   bool UseMaskForGaps =
6849       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6850       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6851   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6852       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6853       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6854 
6855   if (Group->isReverse()) {
6856     // TODO: Add support for reversed masked interleaved access.
6857     assert(!Legal->isMaskRequired(I) &&
6858            "Reverse masked interleaved access not supported.");
6859     Cost +=
6860         Group->getNumMembers() *
6861         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6862   }
6863   return Cost;
6864 }
6865 
6866 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6867     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6868   using namespace llvm::PatternMatch;
6869   // Early exit for no inloop reductions
6870   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6871     return None;
6872   auto *VectorTy = cast<VectorType>(Ty);
6873 
6874   // We are looking for a pattern of, and finding the minimal acceptable cost:
6875   //  reduce(mul(ext(A), ext(B))) or
6876   //  reduce(mul(A, B)) or
6877   //  reduce(ext(A)) or
6878   //  reduce(A).
6879   // The basic idea is that we walk down the tree to do that, finding the root
6880   // reduction instruction in InLoopReductionImmediateChains. From there we find
6881   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6882   // of the components. If the reduction cost is lower then we return it for the
6883   // reduction instruction and 0 for the other instructions in the pattern. If
6884   // it is not we return an invalid cost specifying the orignal cost method
6885   // should be used.
6886   Instruction *RetI = I;
6887   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6888     if (!RetI->hasOneUser())
6889       return None;
6890     RetI = RetI->user_back();
6891   }
6892   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6893       RetI->user_back()->getOpcode() == Instruction::Add) {
6894     if (!RetI->hasOneUser())
6895       return None;
6896     RetI = RetI->user_back();
6897   }
6898 
6899   // Test if the found instruction is a reduction, and if not return an invalid
6900   // cost specifying the parent to use the original cost modelling.
6901   if (!InLoopReductionImmediateChains.count(RetI))
6902     return None;
6903 
6904   // Find the reduction this chain is a part of and calculate the basic cost of
6905   // the reduction on its own.
6906   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6907   Instruction *ReductionPhi = LastChain;
6908   while (!isa<PHINode>(ReductionPhi))
6909     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6910 
6911   const RecurrenceDescriptor &RdxDesc =
6912       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6913 
6914   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6915       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6916 
6917   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6918   // normal fmul instruction to the cost of the fadd reduction.
6919   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6920     BaseCost +=
6921         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6922 
6923   // If we're using ordered reductions then we can just return the base cost
6924   // here, since getArithmeticReductionCost calculates the full ordered
6925   // reduction cost when FP reassociation is not allowed.
6926   if (useOrderedReductions(RdxDesc))
6927     return BaseCost;
6928 
6929   // Get the operand that was not the reduction chain and match it to one of the
6930   // patterns, returning the better cost if it is found.
6931   Instruction *RedOp = RetI->getOperand(1) == LastChain
6932                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6933                            : dyn_cast<Instruction>(RetI->getOperand(1));
6934 
6935   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6936 
6937   Instruction *Op0, *Op1;
6938   if (RedOp &&
6939       match(RedOp,
6940             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6941       match(Op0, m_ZExtOrSExt(m_Value())) &&
6942       Op0->getOpcode() == Op1->getOpcode() &&
6943       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6944       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6945       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6946 
6947     // Matched reduce(ext(mul(ext(A), ext(B)))
6948     // Note that the extend opcodes need to all match, or if A==B they will have
6949     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6950     // which is equally fine.
6951     bool IsUnsigned = isa<ZExtInst>(Op0);
6952     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6953     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6954 
6955     InstructionCost ExtCost =
6956         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6957                              TTI::CastContextHint::None, CostKind, Op0);
6958     InstructionCost MulCost =
6959         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6960     InstructionCost Ext2Cost =
6961         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6962                              TTI::CastContextHint::None, CostKind, RedOp);
6963 
6964     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6965         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6966         CostKind);
6967 
6968     if (RedCost.isValid() &&
6969         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6970       return I == RetI ? RedCost : 0;
6971   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6972              !TheLoop->isLoopInvariant(RedOp)) {
6973     // Matched reduce(ext(A))
6974     bool IsUnsigned = isa<ZExtInst>(RedOp);
6975     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6976     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6977         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6978         CostKind);
6979 
6980     InstructionCost ExtCost =
6981         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6982                              TTI::CastContextHint::None, CostKind, RedOp);
6983     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6984       return I == RetI ? RedCost : 0;
6985   } else if (RedOp &&
6986              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6987     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6988         Op0->getOpcode() == Op1->getOpcode() &&
6989         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6990       bool IsUnsigned = isa<ZExtInst>(Op0);
6991       Type *Op0Ty = Op0->getOperand(0)->getType();
6992       Type *Op1Ty = Op1->getOperand(0)->getType();
6993       Type *LargestOpTy =
6994           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6995                                                                     : Op0Ty;
6996       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6997 
6998       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6999       // different sizes. We take the largest type as the ext to reduce, and add
7000       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
7001       InstructionCost ExtCost0 = TTI.getCastInstrCost(
7002           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
7003           TTI::CastContextHint::None, CostKind, Op0);
7004       InstructionCost ExtCost1 = TTI.getCastInstrCost(
7005           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
7006           TTI::CastContextHint::None, CostKind, Op1);
7007       InstructionCost MulCost =
7008           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7009 
7010       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7011           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7012           CostKind);
7013       InstructionCost ExtraExtCost = 0;
7014       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
7015         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
7016         ExtraExtCost = TTI.getCastInstrCost(
7017             ExtraExtOp->getOpcode(), ExtType,
7018             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
7019             TTI::CastContextHint::None, CostKind, ExtraExtOp);
7020       }
7021 
7022       if (RedCost.isValid() &&
7023           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
7024         return I == RetI ? RedCost : 0;
7025     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
7026       // Matched reduce(mul())
7027       InstructionCost MulCost =
7028           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7029 
7030       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7031           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7032           CostKind);
7033 
7034       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7035         return I == RetI ? RedCost : 0;
7036     }
7037   }
7038 
7039   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
7040 }
7041 
7042 InstructionCost
7043 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7044                                                      ElementCount VF) {
7045   // Calculate scalar cost only. Vectorization cost should be ready at this
7046   // moment.
7047   if (VF.isScalar()) {
7048     Type *ValTy = getLoadStoreType(I);
7049     const Align Alignment = getLoadStoreAlignment(I);
7050     unsigned AS = getLoadStoreAddressSpace(I);
7051 
7052     return TTI.getAddressComputationCost(ValTy) +
7053            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7054                                TTI::TCK_RecipThroughput, I);
7055   }
7056   return getWideningCost(I, VF);
7057 }
7058 
7059 LoopVectorizationCostModel::VectorizationCostTy
7060 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7061                                                ElementCount VF) {
7062   // If we know that this instruction will remain uniform, check the cost of
7063   // the scalar version.
7064   if (isUniformAfterVectorization(I, VF))
7065     VF = ElementCount::getFixed(1);
7066 
7067   if (VF.isVector() && isProfitableToScalarize(I, VF))
7068     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7069 
7070   // Forced scalars do not have any scalarization overhead.
7071   auto ForcedScalar = ForcedScalars.find(VF);
7072   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7073     auto InstSet = ForcedScalar->second;
7074     if (InstSet.count(I))
7075       return VectorizationCostTy(
7076           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7077            VF.getKnownMinValue()),
7078           false);
7079   }
7080 
7081   Type *VectorTy;
7082   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7083 
7084   bool TypeNotScalarized = false;
7085   if (VF.isVector() && VectorTy->isVectorTy()) {
7086     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
7087     if (NumParts)
7088       TypeNotScalarized = NumParts < VF.getKnownMinValue();
7089     else
7090       C = InstructionCost::getInvalid();
7091   }
7092   return VectorizationCostTy(C, TypeNotScalarized);
7093 }
7094 
7095 InstructionCost
7096 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7097                                                      ElementCount VF) const {
7098 
7099   // There is no mechanism yet to create a scalable scalarization loop,
7100   // so this is currently Invalid.
7101   if (VF.isScalable())
7102     return InstructionCost::getInvalid();
7103 
7104   if (VF.isScalar())
7105     return 0;
7106 
7107   InstructionCost Cost = 0;
7108   Type *RetTy = ToVectorTy(I->getType(), VF);
7109   if (!RetTy->isVoidTy() &&
7110       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7111     Cost += TTI.getScalarizationOverhead(
7112         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
7113         false);
7114 
7115   // Some targets keep addresses scalar.
7116   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7117     return Cost;
7118 
7119   // Some targets support efficient element stores.
7120   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7121     return Cost;
7122 
7123   // Collect operands to consider.
7124   CallInst *CI = dyn_cast<CallInst>(I);
7125   Instruction::op_range Ops = CI ? CI->args() : I->operands();
7126 
7127   // Skip operands that do not require extraction/scalarization and do not incur
7128   // any overhead.
7129   SmallVector<Type *> Tys;
7130   for (auto *V : filterExtractingOperands(Ops, VF))
7131     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7132   return Cost + TTI.getOperandsScalarizationOverhead(
7133                     filterExtractingOperands(Ops, VF), Tys);
7134 }
7135 
7136 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7137   if (VF.isScalar())
7138     return;
7139   NumPredStores = 0;
7140   for (BasicBlock *BB : TheLoop->blocks()) {
7141     // For each instruction in the old loop.
7142     for (Instruction &I : *BB) {
7143       Value *Ptr =  getLoadStorePointerOperand(&I);
7144       if (!Ptr)
7145         continue;
7146 
7147       // TODO: We should generate better code and update the cost model for
7148       // predicated uniform stores. Today they are treated as any other
7149       // predicated store (see added test cases in
7150       // invariant-store-vectorization.ll).
7151       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
7152         NumPredStores++;
7153 
7154       if (Legal->isUniformMemOp(I)) {
7155         // TODO: Avoid replicating loads and stores instead of
7156         // relying on instcombine to remove them.
7157         // Load: Scalar load + broadcast
7158         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7159         InstructionCost Cost;
7160         if (isa<StoreInst>(&I) && VF.isScalable() &&
7161             isLegalGatherOrScatter(&I, VF)) {
7162           Cost = getGatherScatterCost(&I, VF);
7163           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7164         } else {
7165           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7166                  "Cannot yet scalarize uniform stores");
7167           Cost = getUniformMemOpCost(&I, VF);
7168           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7169         }
7170         continue;
7171       }
7172 
7173       // We assume that widening is the best solution when possible.
7174       if (memoryInstructionCanBeWidened(&I, VF)) {
7175         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7176         int ConsecutiveStride = Legal->isConsecutivePtr(
7177             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
7178         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7179                "Expected consecutive stride.");
7180         InstWidening Decision =
7181             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7182         setWideningDecision(&I, VF, Decision, Cost);
7183         continue;
7184       }
7185 
7186       // Choose between Interleaving, Gather/Scatter or Scalarization.
7187       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7188       unsigned NumAccesses = 1;
7189       if (isAccessInterleaved(&I)) {
7190         auto Group = getInterleavedAccessGroup(&I);
7191         assert(Group && "Fail to get an interleaved access group.");
7192 
7193         // Make one decision for the whole group.
7194         if (getWideningDecision(&I, VF) != CM_Unknown)
7195           continue;
7196 
7197         NumAccesses = Group->getNumMembers();
7198         if (interleavedAccessCanBeWidened(&I, VF))
7199           InterleaveCost = getInterleaveGroupCost(&I, VF);
7200       }
7201 
7202       InstructionCost GatherScatterCost =
7203           isLegalGatherOrScatter(&I, VF)
7204               ? getGatherScatterCost(&I, VF) * NumAccesses
7205               : InstructionCost::getInvalid();
7206 
7207       InstructionCost ScalarizationCost =
7208           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7209 
7210       // Choose better solution for the current VF,
7211       // write down this decision and use it during vectorization.
7212       InstructionCost Cost;
7213       InstWidening Decision;
7214       if (InterleaveCost <= GatherScatterCost &&
7215           InterleaveCost < ScalarizationCost) {
7216         Decision = CM_Interleave;
7217         Cost = InterleaveCost;
7218       } else if (GatherScatterCost < ScalarizationCost) {
7219         Decision = CM_GatherScatter;
7220         Cost = GatherScatterCost;
7221       } else {
7222         Decision = CM_Scalarize;
7223         Cost = ScalarizationCost;
7224       }
7225       // If the instructions belongs to an interleave group, the whole group
7226       // receives the same decision. The whole group receives the cost, but
7227       // the cost will actually be assigned to one instruction.
7228       if (auto Group = getInterleavedAccessGroup(&I))
7229         setWideningDecision(Group, VF, Decision, Cost);
7230       else
7231         setWideningDecision(&I, VF, Decision, Cost);
7232     }
7233   }
7234 
7235   // Make sure that any load of address and any other address computation
7236   // remains scalar unless there is gather/scatter support. This avoids
7237   // inevitable extracts into address registers, and also has the benefit of
7238   // activating LSR more, since that pass can't optimize vectorized
7239   // addresses.
7240   if (TTI.prefersVectorizedAddressing())
7241     return;
7242 
7243   // Start with all scalar pointer uses.
7244   SmallPtrSet<Instruction *, 8> AddrDefs;
7245   for (BasicBlock *BB : TheLoop->blocks())
7246     for (Instruction &I : *BB) {
7247       Instruction *PtrDef =
7248         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7249       if (PtrDef && TheLoop->contains(PtrDef) &&
7250           getWideningDecision(&I, VF) != CM_GatherScatter)
7251         AddrDefs.insert(PtrDef);
7252     }
7253 
7254   // Add all instructions used to generate the addresses.
7255   SmallVector<Instruction *, 4> Worklist;
7256   append_range(Worklist, AddrDefs);
7257   while (!Worklist.empty()) {
7258     Instruction *I = Worklist.pop_back_val();
7259     for (auto &Op : I->operands())
7260       if (auto *InstOp = dyn_cast<Instruction>(Op))
7261         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7262             AddrDefs.insert(InstOp).second)
7263           Worklist.push_back(InstOp);
7264   }
7265 
7266   for (auto *I : AddrDefs) {
7267     if (isa<LoadInst>(I)) {
7268       // Setting the desired widening decision should ideally be handled in
7269       // by cost functions, but since this involves the task of finding out
7270       // if the loaded register is involved in an address computation, it is
7271       // instead changed here when we know this is the case.
7272       InstWidening Decision = getWideningDecision(I, VF);
7273       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7274         // Scalarize a widened load of address.
7275         setWideningDecision(
7276             I, VF, CM_Scalarize,
7277             (VF.getKnownMinValue() *
7278              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7279       else if (auto Group = getInterleavedAccessGroup(I)) {
7280         // Scalarize an interleave group of address loads.
7281         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7282           if (Instruction *Member = Group->getMember(I))
7283             setWideningDecision(
7284                 Member, VF, CM_Scalarize,
7285                 (VF.getKnownMinValue() *
7286                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7287         }
7288       }
7289     } else
7290       // Make sure I gets scalarized and a cost estimate without
7291       // scalarization overhead.
7292       ForcedScalars[VF].insert(I);
7293   }
7294 }
7295 
7296 InstructionCost
7297 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7298                                                Type *&VectorTy) {
7299   Type *RetTy = I->getType();
7300   if (canTruncateToMinimalBitwidth(I, VF))
7301     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7302   auto SE = PSE.getSE();
7303   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7304 
7305   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7306                                                 ElementCount VF) -> bool {
7307     if (VF.isScalar())
7308       return true;
7309 
7310     auto Scalarized = InstsToScalarize.find(VF);
7311     assert(Scalarized != InstsToScalarize.end() &&
7312            "VF not yet analyzed for scalarization profitability");
7313     return !Scalarized->second.count(I) &&
7314            llvm::all_of(I->users(), [&](User *U) {
7315              auto *UI = cast<Instruction>(U);
7316              return !Scalarized->second.count(UI);
7317            });
7318   };
7319   (void) hasSingleCopyAfterVectorization;
7320 
7321   if (isScalarAfterVectorization(I, VF)) {
7322     // With the exception of GEPs and PHIs, after scalarization there should
7323     // only be one copy of the instruction generated in the loop. This is
7324     // because the VF is either 1, or any instructions that need scalarizing
7325     // have already been dealt with by the the time we get here. As a result,
7326     // it means we don't have to multiply the instruction cost by VF.
7327     assert(I->getOpcode() == Instruction::GetElementPtr ||
7328            I->getOpcode() == Instruction::PHI ||
7329            (I->getOpcode() == Instruction::BitCast &&
7330             I->getType()->isPointerTy()) ||
7331            hasSingleCopyAfterVectorization(I, VF));
7332     VectorTy = RetTy;
7333   } else
7334     VectorTy = ToVectorTy(RetTy, VF);
7335 
7336   // TODO: We need to estimate the cost of intrinsic calls.
7337   switch (I->getOpcode()) {
7338   case Instruction::GetElementPtr:
7339     // We mark this instruction as zero-cost because the cost of GEPs in
7340     // vectorized code depends on whether the corresponding memory instruction
7341     // is scalarized or not. Therefore, we handle GEPs with the memory
7342     // instruction cost.
7343     return 0;
7344   case Instruction::Br: {
7345     // In cases of scalarized and predicated instructions, there will be VF
7346     // predicated blocks in the vectorized loop. Each branch around these
7347     // blocks requires also an extract of its vector compare i1 element.
7348     bool ScalarPredicatedBB = false;
7349     BranchInst *BI = cast<BranchInst>(I);
7350     if (VF.isVector() && BI->isConditional() &&
7351         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7352          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7353       ScalarPredicatedBB = true;
7354 
7355     if (ScalarPredicatedBB) {
7356       // Not possible to scalarize scalable vector with predicated instructions.
7357       if (VF.isScalable())
7358         return InstructionCost::getInvalid();
7359       // Return cost for branches around scalarized and predicated blocks.
7360       auto *Vec_i1Ty =
7361           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7362       return (
7363           TTI.getScalarizationOverhead(
7364               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7365           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7366     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7367       // The back-edge branch will remain, as will all scalar branches.
7368       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7369     else
7370       // This branch will be eliminated by if-conversion.
7371       return 0;
7372     // Note: We currently assume zero cost for an unconditional branch inside
7373     // a predicated block since it will become a fall-through, although we
7374     // may decide in the future to call TTI for all branches.
7375   }
7376   case Instruction::PHI: {
7377     auto *Phi = cast<PHINode>(I);
7378 
7379     // First-order recurrences are replaced by vector shuffles inside the loop.
7380     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7381     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7382       return TTI.getShuffleCost(
7383           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7384           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7385 
7386     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7387     // converted into select instructions. We require N - 1 selects per phi
7388     // node, where N is the number of incoming values.
7389     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7390       return (Phi->getNumIncomingValues() - 1) *
7391              TTI.getCmpSelInstrCost(
7392                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7393                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7394                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7395 
7396     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7397   }
7398   case Instruction::UDiv:
7399   case Instruction::SDiv:
7400   case Instruction::URem:
7401   case Instruction::SRem:
7402     // If we have a predicated instruction, it may not be executed for each
7403     // vector lane. Get the scalarization cost and scale this amount by the
7404     // probability of executing the predicated block. If the instruction is not
7405     // predicated, we fall through to the next case.
7406     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7407       InstructionCost Cost = 0;
7408 
7409       // These instructions have a non-void type, so account for the phi nodes
7410       // that we will create. This cost is likely to be zero. The phi node
7411       // cost, if any, should be scaled by the block probability because it
7412       // models a copy at the end of each predicated block.
7413       Cost += VF.getKnownMinValue() *
7414               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7415 
7416       // The cost of the non-predicated instruction.
7417       Cost += VF.getKnownMinValue() *
7418               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7419 
7420       // The cost of insertelement and extractelement instructions needed for
7421       // scalarization.
7422       Cost += getScalarizationOverhead(I, VF);
7423 
7424       // Scale the cost by the probability of executing the predicated blocks.
7425       // This assumes the predicated block for each vector lane is equally
7426       // likely.
7427       return Cost / getReciprocalPredBlockProb();
7428     }
7429     LLVM_FALLTHROUGH;
7430   case Instruction::Add:
7431   case Instruction::FAdd:
7432   case Instruction::Sub:
7433   case Instruction::FSub:
7434   case Instruction::Mul:
7435   case Instruction::FMul:
7436   case Instruction::FDiv:
7437   case Instruction::FRem:
7438   case Instruction::Shl:
7439   case Instruction::LShr:
7440   case Instruction::AShr:
7441   case Instruction::And:
7442   case Instruction::Or:
7443   case Instruction::Xor: {
7444     // Since we will replace the stride by 1 the multiplication should go away.
7445     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7446       return 0;
7447 
7448     // Detect reduction patterns
7449     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7450       return *RedCost;
7451 
7452     // Certain instructions can be cheaper to vectorize if they have a constant
7453     // second vector operand. One example of this are shifts on x86.
7454     Value *Op2 = I->getOperand(1);
7455     TargetTransformInfo::OperandValueProperties Op2VP;
7456     TargetTransformInfo::OperandValueKind Op2VK =
7457         TTI.getOperandInfo(Op2, Op2VP);
7458     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7459       Op2VK = TargetTransformInfo::OK_UniformValue;
7460 
7461     SmallVector<const Value *, 4> Operands(I->operand_values());
7462     return TTI.getArithmeticInstrCost(
7463         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7464         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7465   }
7466   case Instruction::FNeg: {
7467     return TTI.getArithmeticInstrCost(
7468         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7469         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7470         TargetTransformInfo::OP_None, I->getOperand(0), I);
7471   }
7472   case Instruction::Select: {
7473     SelectInst *SI = cast<SelectInst>(I);
7474     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7475     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7476 
7477     const Value *Op0, *Op1;
7478     using namespace llvm::PatternMatch;
7479     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7480                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7481       // select x, y, false --> x & y
7482       // select x, true, y --> x | y
7483       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7484       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7485       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7486       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7487       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7488               Op1->getType()->getScalarSizeInBits() == 1);
7489 
7490       SmallVector<const Value *, 2> Operands{Op0, Op1};
7491       return TTI.getArithmeticInstrCost(
7492           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7493           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7494     }
7495 
7496     Type *CondTy = SI->getCondition()->getType();
7497     if (!ScalarCond)
7498       CondTy = VectorType::get(CondTy, VF);
7499 
7500     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7501     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7502       Pred = Cmp->getPredicate();
7503     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7504                                   CostKind, I);
7505   }
7506   case Instruction::ICmp:
7507   case Instruction::FCmp: {
7508     Type *ValTy = I->getOperand(0)->getType();
7509     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7510     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7511       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7512     VectorTy = ToVectorTy(ValTy, VF);
7513     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7514                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7515                                   I);
7516   }
7517   case Instruction::Store:
7518   case Instruction::Load: {
7519     ElementCount Width = VF;
7520     if (Width.isVector()) {
7521       InstWidening Decision = getWideningDecision(I, Width);
7522       assert(Decision != CM_Unknown &&
7523              "CM decision should be taken at this point");
7524       if (Decision == CM_Scalarize)
7525         Width = ElementCount::getFixed(1);
7526     }
7527     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7528     return getMemoryInstructionCost(I, VF);
7529   }
7530   case Instruction::BitCast:
7531     if (I->getType()->isPointerTy())
7532       return 0;
7533     LLVM_FALLTHROUGH;
7534   case Instruction::ZExt:
7535   case Instruction::SExt:
7536   case Instruction::FPToUI:
7537   case Instruction::FPToSI:
7538   case Instruction::FPExt:
7539   case Instruction::PtrToInt:
7540   case Instruction::IntToPtr:
7541   case Instruction::SIToFP:
7542   case Instruction::UIToFP:
7543   case Instruction::Trunc:
7544   case Instruction::FPTrunc: {
7545     // Computes the CastContextHint from a Load/Store instruction.
7546     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7547       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7548              "Expected a load or a store!");
7549 
7550       if (VF.isScalar() || !TheLoop->contains(I))
7551         return TTI::CastContextHint::Normal;
7552 
7553       switch (getWideningDecision(I, VF)) {
7554       case LoopVectorizationCostModel::CM_GatherScatter:
7555         return TTI::CastContextHint::GatherScatter;
7556       case LoopVectorizationCostModel::CM_Interleave:
7557         return TTI::CastContextHint::Interleave;
7558       case LoopVectorizationCostModel::CM_Scalarize:
7559       case LoopVectorizationCostModel::CM_Widen:
7560         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7561                                         : TTI::CastContextHint::Normal;
7562       case LoopVectorizationCostModel::CM_Widen_Reverse:
7563         return TTI::CastContextHint::Reversed;
7564       case LoopVectorizationCostModel::CM_Unknown:
7565         llvm_unreachable("Instr did not go through cost modelling?");
7566       }
7567 
7568       llvm_unreachable("Unhandled case!");
7569     };
7570 
7571     unsigned Opcode = I->getOpcode();
7572     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7573     // For Trunc, the context is the only user, which must be a StoreInst.
7574     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7575       if (I->hasOneUse())
7576         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7577           CCH = ComputeCCH(Store);
7578     }
7579     // For Z/Sext, the context is the operand, which must be a LoadInst.
7580     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7581              Opcode == Instruction::FPExt) {
7582       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7583         CCH = ComputeCCH(Load);
7584     }
7585 
7586     // We optimize the truncation of induction variables having constant
7587     // integer steps. The cost of these truncations is the same as the scalar
7588     // operation.
7589     if (isOptimizableIVTruncate(I, VF)) {
7590       auto *Trunc = cast<TruncInst>(I);
7591       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7592                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7593     }
7594 
7595     // Detect reduction patterns
7596     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7597       return *RedCost;
7598 
7599     Type *SrcScalarTy = I->getOperand(0)->getType();
7600     Type *SrcVecTy =
7601         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7602     if (canTruncateToMinimalBitwidth(I, VF)) {
7603       // This cast is going to be shrunk. This may remove the cast or it might
7604       // turn it into slightly different cast. For example, if MinBW == 16,
7605       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7606       //
7607       // Calculate the modified src and dest types.
7608       Type *MinVecTy = VectorTy;
7609       if (Opcode == Instruction::Trunc) {
7610         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7611         VectorTy =
7612             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7613       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7614         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7615         VectorTy =
7616             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7617       }
7618     }
7619 
7620     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7621   }
7622   case Instruction::Call: {
7623     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7624       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7625         return *RedCost;
7626     bool NeedToScalarize;
7627     CallInst *CI = cast<CallInst>(I);
7628     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7629     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7630       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7631       return std::min(CallCost, IntrinsicCost);
7632     }
7633     return CallCost;
7634   }
7635   case Instruction::ExtractValue:
7636     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7637   case Instruction::Alloca:
7638     // We cannot easily widen alloca to a scalable alloca, as
7639     // the result would need to be a vector of pointers.
7640     if (VF.isScalable())
7641       return InstructionCost::getInvalid();
7642     LLVM_FALLTHROUGH;
7643   default:
7644     // This opcode is unknown. Assume that it is the same as 'mul'.
7645     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7646   } // end of switch.
7647 }
7648 
7649 char LoopVectorize::ID = 0;
7650 
7651 static const char lv_name[] = "Loop Vectorization";
7652 
7653 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7654 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7655 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7656 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7657 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7658 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7659 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7660 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7661 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7662 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7663 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7664 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7665 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7666 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7667 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7668 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7669 
7670 namespace llvm {
7671 
7672 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7673 
7674 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7675                               bool VectorizeOnlyWhenForced) {
7676   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7677 }
7678 
7679 } // end namespace llvm
7680 
7681 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7682   // Check if the pointer operand of a load or store instruction is
7683   // consecutive.
7684   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7685     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7686   return false;
7687 }
7688 
7689 void LoopVectorizationCostModel::collectValuesToIgnore() {
7690   // Ignore ephemeral values.
7691   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7692 
7693   // Ignore type-promoting instructions we identified during reduction
7694   // detection.
7695   for (auto &Reduction : Legal->getReductionVars()) {
7696     const RecurrenceDescriptor &RedDes = Reduction.second;
7697     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7698     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7699   }
7700   // Ignore type-casting instructions we identified during induction
7701   // detection.
7702   for (auto &Induction : Legal->getInductionVars()) {
7703     const InductionDescriptor &IndDes = Induction.second;
7704     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7705     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7706   }
7707 }
7708 
7709 void LoopVectorizationCostModel::collectInLoopReductions() {
7710   for (auto &Reduction : Legal->getReductionVars()) {
7711     PHINode *Phi = Reduction.first;
7712     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7713 
7714     // We don't collect reductions that are type promoted (yet).
7715     if (RdxDesc.getRecurrenceType() != Phi->getType())
7716       continue;
7717 
7718     // If the target would prefer this reduction to happen "in-loop", then we
7719     // want to record it as such.
7720     unsigned Opcode = RdxDesc.getOpcode();
7721     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7722         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7723                                    TargetTransformInfo::ReductionFlags()))
7724       continue;
7725 
7726     // Check that we can correctly put the reductions into the loop, by
7727     // finding the chain of operations that leads from the phi to the loop
7728     // exit value.
7729     SmallVector<Instruction *, 4> ReductionOperations =
7730         RdxDesc.getReductionOpChain(Phi, TheLoop);
7731     bool InLoop = !ReductionOperations.empty();
7732     if (InLoop) {
7733       InLoopReductionChains[Phi] = ReductionOperations;
7734       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7735       Instruction *LastChain = Phi;
7736       for (auto *I : ReductionOperations) {
7737         InLoopReductionImmediateChains[I] = LastChain;
7738         LastChain = I;
7739       }
7740     }
7741     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7742                       << " reduction for phi: " << *Phi << "\n");
7743   }
7744 }
7745 
7746 // TODO: we could return a pair of values that specify the max VF and
7747 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7748 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7749 // doesn't have a cost model that can choose which plan to execute if
7750 // more than one is generated.
7751 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7752                                  LoopVectorizationCostModel &CM) {
7753   unsigned WidestType;
7754   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7755   return WidestVectorRegBits / WidestType;
7756 }
7757 
7758 VectorizationFactor
7759 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7760   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7761   ElementCount VF = UserVF;
7762   // Outer loop handling: They may require CFG and instruction level
7763   // transformations before even evaluating whether vectorization is profitable.
7764   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7765   // the vectorization pipeline.
7766   if (!OrigLoop->isInnermost()) {
7767     // If the user doesn't provide a vectorization factor, determine a
7768     // reasonable one.
7769     if (UserVF.isZero()) {
7770       VF = ElementCount::getFixed(determineVPlanVF(
7771           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7772               .getFixedSize(),
7773           CM));
7774       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7775 
7776       // Make sure we have a VF > 1 for stress testing.
7777       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7778         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7779                           << "overriding computed VF.\n");
7780         VF = ElementCount::getFixed(4);
7781       }
7782     }
7783     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7784     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7785            "VF needs to be a power of two");
7786     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7787                       << "VF " << VF << " to build VPlans.\n");
7788     buildVPlans(VF, VF);
7789 
7790     // For VPlan build stress testing, we bail out after VPlan construction.
7791     if (VPlanBuildStressTest)
7792       return VectorizationFactor::Disabled();
7793 
7794     return {VF, 0 /*Cost*/};
7795   }
7796 
7797   LLVM_DEBUG(
7798       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7799                 "VPlan-native path.\n");
7800   return VectorizationFactor::Disabled();
7801 }
7802 
7803 Optional<VectorizationFactor>
7804 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7805   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7806   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7807   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7808     return None;
7809 
7810   // Invalidate interleave groups if all blocks of loop will be predicated.
7811   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7812       !useMaskedInterleavedAccesses(*TTI)) {
7813     LLVM_DEBUG(
7814         dbgs()
7815         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7816            "which requires masked-interleaved support.\n");
7817     if (CM.InterleaveInfo.invalidateGroups())
7818       // Invalidating interleave groups also requires invalidating all decisions
7819       // based on them, which includes widening decisions and uniform and scalar
7820       // values.
7821       CM.invalidateCostModelingDecisions();
7822   }
7823 
7824   ElementCount MaxUserVF =
7825       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7826   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7827   if (!UserVF.isZero() && UserVFIsLegal) {
7828     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7829            "VF needs to be a power of two");
7830     // Collect the instructions (and their associated costs) that will be more
7831     // profitable to scalarize.
7832     if (CM.selectUserVectorizationFactor(UserVF)) {
7833       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7834       CM.collectInLoopReductions();
7835       buildVPlansWithVPRecipes(UserVF, UserVF);
7836       LLVM_DEBUG(printPlans(dbgs()));
7837       return {{UserVF, 0}};
7838     } else
7839       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7840                               "InvalidCost", ORE, OrigLoop);
7841   }
7842 
7843   // Populate the set of Vectorization Factor Candidates.
7844   ElementCountSet VFCandidates;
7845   for (auto VF = ElementCount::getFixed(1);
7846        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7847     VFCandidates.insert(VF);
7848   for (auto VF = ElementCount::getScalable(1);
7849        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7850     VFCandidates.insert(VF);
7851 
7852   for (const auto &VF : VFCandidates) {
7853     // Collect Uniform and Scalar instructions after vectorization with VF.
7854     CM.collectUniformsAndScalars(VF);
7855 
7856     // Collect the instructions (and their associated costs) that will be more
7857     // profitable to scalarize.
7858     if (VF.isVector())
7859       CM.collectInstsToScalarize(VF);
7860   }
7861 
7862   CM.collectInLoopReductions();
7863   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7864   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7865 
7866   LLVM_DEBUG(printPlans(dbgs()));
7867   if (!MaxFactors.hasVector())
7868     return VectorizationFactor::Disabled();
7869 
7870   // Select the optimal vectorization factor.
7871   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7872 
7873   // Check if it is profitable to vectorize with runtime checks.
7874   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7875   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7876     bool PragmaThresholdReached =
7877         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7878     bool ThresholdReached =
7879         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7880     if ((ThresholdReached && !Hints.allowReordering()) ||
7881         PragmaThresholdReached) {
7882       ORE->emit([&]() {
7883         return OptimizationRemarkAnalysisAliasing(
7884                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7885                    OrigLoop->getHeader())
7886                << "loop not vectorized: cannot prove it is safe to reorder "
7887                   "memory operations";
7888       });
7889       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7890       Hints.emitRemarkWithHints();
7891       return VectorizationFactor::Disabled();
7892     }
7893   }
7894   return SelectedVF;
7895 }
7896 
7897 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7898   assert(count_if(VPlans,
7899                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7900              1 &&
7901          "Best VF has not a single VPlan.");
7902 
7903   for (const VPlanPtr &Plan : VPlans) {
7904     if (Plan->hasVF(VF))
7905       return *Plan.get();
7906   }
7907   llvm_unreachable("No plan found!");
7908 }
7909 
7910 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7911                                            VPlan &BestVPlan,
7912                                            InnerLoopVectorizer &ILV,
7913                                            DominatorTree *DT) {
7914   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7915                     << '\n');
7916 
7917   // Perform the actual loop transformation.
7918 
7919   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7920   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7921   Value *CanonicalIVStartValue;
7922   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7923       ILV.createVectorizedLoopSkeleton();
7924   ILV.collectPoisonGeneratingRecipes(State);
7925 
7926   ILV.printDebugTracesAtStart();
7927 
7928   //===------------------------------------------------===//
7929   //
7930   // Notice: any optimization or new instruction that go
7931   // into the code below should also be implemented in
7932   // the cost-model.
7933   //
7934   //===------------------------------------------------===//
7935 
7936   // 2. Copy and widen instructions from the old loop into the new loop.
7937   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7938                              ILV.getOrCreateVectorTripCount(nullptr),
7939                              CanonicalIVStartValue, State);
7940   BestVPlan.execute(&State);
7941 
7942   // Keep all loop hints from the original loop on the vector loop (we'll
7943   // replace the vectorizer-specific hints below).
7944   MDNode *OrigLoopID = OrigLoop->getLoopID();
7945 
7946   Optional<MDNode *> VectorizedLoopID =
7947       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7948                                       LLVMLoopVectorizeFollowupVectorized});
7949 
7950   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7951   if (VectorizedLoopID.hasValue())
7952     L->setLoopID(VectorizedLoopID.getValue());
7953   else {
7954     // Keep all loop hints from the original loop on the vector loop (we'll
7955     // replace the vectorizer-specific hints below).
7956     if (MDNode *LID = OrigLoop->getLoopID())
7957       L->setLoopID(LID);
7958 
7959     LoopVectorizeHints Hints(L, true, *ORE);
7960     Hints.setAlreadyVectorized();
7961   }
7962 
7963   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7964   //    predication, updating analyses.
7965   ILV.fixVectorizedLoop(State);
7966 
7967   ILV.printDebugTracesAtEnd();
7968 }
7969 
7970 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7971 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7972   for (const auto &Plan : VPlans)
7973     if (PrintVPlansInDotFormat)
7974       Plan->printDOT(O);
7975     else
7976       Plan->print(O);
7977 }
7978 #endif
7979 
7980 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7981     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7982 
7983   // We create new control-flow for the vectorized loop, so the original exit
7984   // conditions will be dead after vectorization if it's only used by the
7985   // terminator
7986   SmallVector<BasicBlock*> ExitingBlocks;
7987   OrigLoop->getExitingBlocks(ExitingBlocks);
7988   for (auto *BB : ExitingBlocks) {
7989     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7990     if (!Cmp || !Cmp->hasOneUse())
7991       continue;
7992 
7993     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7994     if (!DeadInstructions.insert(Cmp).second)
7995       continue;
7996 
7997     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7998     // TODO: can recurse through operands in general
7999     for (Value *Op : Cmp->operands()) {
8000       if (isa<TruncInst>(Op) && Op->hasOneUse())
8001           DeadInstructions.insert(cast<Instruction>(Op));
8002     }
8003   }
8004 
8005   // We create new "steps" for induction variable updates to which the original
8006   // induction variables map. An original update instruction will be dead if
8007   // all its users except the induction variable are dead.
8008   auto *Latch = OrigLoop->getLoopLatch();
8009   for (auto &Induction : Legal->getInductionVars()) {
8010     PHINode *Ind = Induction.first;
8011     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8012 
8013     // If the tail is to be folded by masking, the primary induction variable,
8014     // if exists, isn't dead: it will be used for masking. Don't kill it.
8015     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8016       continue;
8017 
8018     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8019           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8020         }))
8021       DeadInstructions.insert(IndUpdate);
8022   }
8023 }
8024 
8025 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8026 
8027 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8028   SmallVector<Metadata *, 4> MDs;
8029   // Reserve first location for self reference to the LoopID metadata node.
8030   MDs.push_back(nullptr);
8031   bool IsUnrollMetadata = false;
8032   MDNode *LoopID = L->getLoopID();
8033   if (LoopID) {
8034     // First find existing loop unrolling disable metadata.
8035     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8036       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8037       if (MD) {
8038         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8039         IsUnrollMetadata =
8040             S && S->getString().startswith("llvm.loop.unroll.disable");
8041       }
8042       MDs.push_back(LoopID->getOperand(i));
8043     }
8044   }
8045 
8046   if (!IsUnrollMetadata) {
8047     // Add runtime unroll disable metadata.
8048     LLVMContext &Context = L->getHeader()->getContext();
8049     SmallVector<Metadata *, 1> DisableOperands;
8050     DisableOperands.push_back(
8051         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8052     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8053     MDs.push_back(DisableNode);
8054     MDNode *NewLoopID = MDNode::get(Context, MDs);
8055     // Set operand 0 to refer to the loop id itself.
8056     NewLoopID->replaceOperandWith(0, NewLoopID);
8057     L->setLoopID(NewLoopID);
8058   }
8059 }
8060 
8061 //===--------------------------------------------------------------------===//
8062 // EpilogueVectorizerMainLoop
8063 //===--------------------------------------------------------------------===//
8064 
8065 /// This function is partially responsible for generating the control flow
8066 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8067 std::pair<BasicBlock *, Value *>
8068 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8069   MDNode *OrigLoopID = OrigLoop->getLoopID();
8070   Loop *Lp = createVectorLoopSkeleton("");
8071 
8072   // Generate the code to check the minimum iteration count of the vector
8073   // epilogue (see below).
8074   EPI.EpilogueIterationCountCheck =
8075       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8076   EPI.EpilogueIterationCountCheck->setName("iter.check");
8077 
8078   // Generate the code to check any assumptions that we've made for SCEV
8079   // expressions.
8080   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8081 
8082   // Generate the code that checks at runtime if arrays overlap. We put the
8083   // checks into a separate block to make the more common case of few elements
8084   // faster.
8085   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8086 
8087   // Generate the iteration count check for the main loop, *after* the check
8088   // for the epilogue loop, so that the path-length is shorter for the case
8089   // that goes directly through the vector epilogue. The longer-path length for
8090   // the main loop is compensated for, by the gain from vectorizing the larger
8091   // trip count. Note: the branch will get updated later on when we vectorize
8092   // the epilogue.
8093   EPI.MainLoopIterationCountCheck =
8094       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8095 
8096   // Generate the induction variable.
8097   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8098   EPI.VectorTripCount = CountRoundDown;
8099   createHeaderBranch(Lp);
8100 
8101   // Skip induction resume value creation here because they will be created in
8102   // the second pass. If we created them here, they wouldn't be used anyway,
8103   // because the vplan in the second pass still contains the inductions from the
8104   // original loop.
8105 
8106   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
8107 }
8108 
8109 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8110   LLVM_DEBUG({
8111     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8112            << "Main Loop VF:" << EPI.MainLoopVF
8113            << ", Main Loop UF:" << EPI.MainLoopUF
8114            << ", Epilogue Loop VF:" << EPI.EpilogueVF
8115            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8116   });
8117 }
8118 
8119 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8120   DEBUG_WITH_TYPE(VerboseDebug, {
8121     dbgs() << "intermediate fn:\n"
8122            << *OrigLoop->getHeader()->getParent() << "\n";
8123   });
8124 }
8125 
8126 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8127     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8128   assert(L && "Expected valid Loop.");
8129   assert(Bypass && "Expected valid bypass basic block.");
8130   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
8131   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8132   Value *Count = getOrCreateTripCount(L);
8133   // Reuse existing vector loop preheader for TC checks.
8134   // Note that new preheader block is generated for vector loop.
8135   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8136   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8137 
8138   // Generate code to check if the loop's trip count is less than VF * UF of the
8139   // main vector loop.
8140   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8141       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8142 
8143   Value *CheckMinIters = Builder.CreateICmp(
8144       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
8145       "min.iters.check");
8146 
8147   if (!ForEpilogue)
8148     TCCheckBlock->setName("vector.main.loop.iter.check");
8149 
8150   // Create new preheader for vector loop.
8151   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8152                                    DT, LI, nullptr, "vector.ph");
8153 
8154   if (ForEpilogue) {
8155     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8156                                  DT->getNode(Bypass)->getIDom()) &&
8157            "TC check is expected to dominate Bypass");
8158 
8159     // Update dominator for Bypass & LoopExit.
8160     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8161     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8162       // For loops with multiple exits, there's no edge from the middle block
8163       // to exit blocks (as the epilogue must run) and thus no need to update
8164       // the immediate dominator of the exit blocks.
8165       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8166 
8167     LoopBypassBlocks.push_back(TCCheckBlock);
8168 
8169     // Save the trip count so we don't have to regenerate it in the
8170     // vec.epilog.iter.check. This is safe to do because the trip count
8171     // generated here dominates the vector epilog iter check.
8172     EPI.TripCount = Count;
8173   }
8174 
8175   ReplaceInstWithInst(
8176       TCCheckBlock->getTerminator(),
8177       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8178 
8179   return TCCheckBlock;
8180 }
8181 
8182 //===--------------------------------------------------------------------===//
8183 // EpilogueVectorizerEpilogueLoop
8184 //===--------------------------------------------------------------------===//
8185 
8186 /// This function is partially responsible for generating the control flow
8187 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8188 std::pair<BasicBlock *, Value *>
8189 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8190   MDNode *OrigLoopID = OrigLoop->getLoopID();
8191   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8192 
8193   // Now, compare the remaining count and if there aren't enough iterations to
8194   // execute the vectorized epilogue skip to the scalar part.
8195   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8196   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8197   LoopVectorPreHeader =
8198       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8199                  LI, nullptr, "vec.epilog.ph");
8200   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8201                                           VecEpilogueIterationCountCheck);
8202 
8203   // Adjust the control flow taking the state info from the main loop
8204   // vectorization into account.
8205   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8206          "expected this to be saved from the previous pass.");
8207   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8208       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8209 
8210   DT->changeImmediateDominator(LoopVectorPreHeader,
8211                                EPI.MainLoopIterationCountCheck);
8212 
8213   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8214       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8215 
8216   if (EPI.SCEVSafetyCheck)
8217     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8218         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8219   if (EPI.MemSafetyCheck)
8220     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8221         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8222 
8223   DT->changeImmediateDominator(
8224       VecEpilogueIterationCountCheck,
8225       VecEpilogueIterationCountCheck->getSinglePredecessor());
8226 
8227   DT->changeImmediateDominator(LoopScalarPreHeader,
8228                                EPI.EpilogueIterationCountCheck);
8229   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8230     // If there is an epilogue which must run, there's no edge from the
8231     // middle block to exit blocks  and thus no need to update the immediate
8232     // dominator of the exit blocks.
8233     DT->changeImmediateDominator(LoopExitBlock,
8234                                  EPI.EpilogueIterationCountCheck);
8235 
8236   // Keep track of bypass blocks, as they feed start values to the induction
8237   // phis in the scalar loop preheader.
8238   if (EPI.SCEVSafetyCheck)
8239     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8240   if (EPI.MemSafetyCheck)
8241     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8242   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8243 
8244   // Generate a resume induction for the vector epilogue and put it in the
8245   // vector epilogue preheader
8246   Type *IdxTy = Legal->getWidestInductionType();
8247   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8248                                          LoopVectorPreHeader->getFirstNonPHI());
8249   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8250   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8251                            EPI.MainLoopIterationCountCheck);
8252 
8253   // Generate the induction variable.
8254   createHeaderBranch(Lp);
8255 
8256   // Generate induction resume values. These variables save the new starting
8257   // indexes for the scalar loop. They are used to test if there are any tail
8258   // iterations left once the vector loop has completed.
8259   // Note that when the vectorized epilogue is skipped due to iteration count
8260   // check, then the resume value for the induction variable comes from
8261   // the trip count of the main vector loop, hence passing the AdditionalBypass
8262   // argument.
8263   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8264                                    EPI.VectorTripCount} /* AdditionalBypass */);
8265 
8266   AddRuntimeUnrollDisableMetaData(Lp);
8267   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8268 }
8269 
8270 BasicBlock *
8271 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8272     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8273 
8274   assert(EPI.TripCount &&
8275          "Expected trip count to have been safed in the first pass.");
8276   assert(
8277       (!isa<Instruction>(EPI.TripCount) ||
8278        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8279       "saved trip count does not dominate insertion point.");
8280   Value *TC = EPI.TripCount;
8281   IRBuilder<> Builder(Insert->getTerminator());
8282   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8283 
8284   // Generate code to check if the loop's trip count is less than VF * UF of the
8285   // vector epilogue loop.
8286   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8287       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8288 
8289   Value *CheckMinIters =
8290       Builder.CreateICmp(P, Count,
8291                          createStepForVF(Builder, Count->getType(),
8292                                          EPI.EpilogueVF, EPI.EpilogueUF),
8293                          "min.epilog.iters.check");
8294 
8295   ReplaceInstWithInst(
8296       Insert->getTerminator(),
8297       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8298 
8299   LoopBypassBlocks.push_back(Insert);
8300   return Insert;
8301 }
8302 
8303 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8304   LLVM_DEBUG({
8305     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8306            << "Epilogue Loop VF:" << EPI.EpilogueVF
8307            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8308   });
8309 }
8310 
8311 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8312   DEBUG_WITH_TYPE(VerboseDebug, {
8313     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8314   });
8315 }
8316 
8317 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8318     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8319   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8320   bool PredicateAtRangeStart = Predicate(Range.Start);
8321 
8322   for (ElementCount TmpVF = Range.Start * 2;
8323        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8324     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8325       Range.End = TmpVF;
8326       break;
8327     }
8328 
8329   return PredicateAtRangeStart;
8330 }
8331 
8332 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8333 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8334 /// of VF's starting at a given VF and extending it as much as possible. Each
8335 /// vectorization decision can potentially shorten this sub-range during
8336 /// buildVPlan().
8337 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8338                                            ElementCount MaxVF) {
8339   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8340   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8341     VFRange SubRange = {VF, MaxVFPlusOne};
8342     VPlans.push_back(buildVPlan(SubRange));
8343     VF = SubRange.End;
8344   }
8345 }
8346 
8347 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8348                                          VPlanPtr &Plan) {
8349   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8350 
8351   // Look for cached value.
8352   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8353   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8354   if (ECEntryIt != EdgeMaskCache.end())
8355     return ECEntryIt->second;
8356 
8357   VPValue *SrcMask = createBlockInMask(Src, Plan);
8358 
8359   // The terminator has to be a branch inst!
8360   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8361   assert(BI && "Unexpected terminator found");
8362 
8363   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8364     return EdgeMaskCache[Edge] = SrcMask;
8365 
8366   // If source is an exiting block, we know the exit edge is dynamically dead
8367   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8368   // adding uses of an otherwise potentially dead instruction.
8369   if (OrigLoop->isLoopExiting(Src))
8370     return EdgeMaskCache[Edge] = SrcMask;
8371 
8372   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8373   assert(EdgeMask && "No Edge Mask found for condition");
8374 
8375   if (BI->getSuccessor(0) != Dst)
8376     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8377 
8378   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8379     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8380     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8381     // The select version does not introduce new UB if SrcMask is false and
8382     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8383     VPValue *False = Plan->getOrAddVPValue(
8384         ConstantInt::getFalse(BI->getCondition()->getType()));
8385     EdgeMask =
8386         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8387   }
8388 
8389   return EdgeMaskCache[Edge] = EdgeMask;
8390 }
8391 
8392 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8393   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8394 
8395   // Look for cached value.
8396   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8397   if (BCEntryIt != BlockMaskCache.end())
8398     return BCEntryIt->second;
8399 
8400   // All-one mask is modelled as no-mask following the convention for masked
8401   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8402   VPValue *BlockMask = nullptr;
8403 
8404   if (OrigLoop->getHeader() == BB) {
8405     if (!CM.blockNeedsPredicationForAnyReason(BB))
8406       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8407 
8408     // Introduce the early-exit compare IV <= BTC to form header block mask.
8409     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8410     // constructing the desired canonical IV in the header block as its first
8411     // non-phi instructions.
8412     assert(CM.foldTailByMasking() && "must fold the tail");
8413     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8414     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8415 
8416     VPValue *IV = nullptr;
8417     if (Legal->getPrimaryInduction())
8418       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8419     else {
8420       auto *IVRecipe = new VPWidenCanonicalIVRecipe();
8421       HeaderVPBB->insert(IVRecipe, NewInsertionPoint);
8422       IV = IVRecipe;
8423     }
8424 
8425     VPBuilder::InsertPointGuard Guard(Builder);
8426     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8427     if (CM.TTI.emitGetActiveLaneMask()) {
8428       VPValue *TC = Plan->getOrCreateTripCount();
8429       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8430     } else {
8431       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8432       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8433     }
8434     return BlockMaskCache[BB] = BlockMask;
8435   }
8436 
8437   // This is the block mask. We OR all incoming edges.
8438   for (auto *Predecessor : predecessors(BB)) {
8439     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8440     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8441       return BlockMaskCache[BB] = EdgeMask;
8442 
8443     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8444       BlockMask = EdgeMask;
8445       continue;
8446     }
8447 
8448     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8449   }
8450 
8451   return BlockMaskCache[BB] = BlockMask;
8452 }
8453 
8454 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8455                                                 ArrayRef<VPValue *> Operands,
8456                                                 VFRange &Range,
8457                                                 VPlanPtr &Plan) {
8458   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8459          "Must be called with either a load or store");
8460 
8461   auto willWiden = [&](ElementCount VF) -> bool {
8462     if (VF.isScalar())
8463       return false;
8464     LoopVectorizationCostModel::InstWidening Decision =
8465         CM.getWideningDecision(I, VF);
8466     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8467            "CM decision should be taken at this point.");
8468     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8469       return true;
8470     if (CM.isScalarAfterVectorization(I, VF) ||
8471         CM.isProfitableToScalarize(I, VF))
8472       return false;
8473     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8474   };
8475 
8476   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8477     return nullptr;
8478 
8479   VPValue *Mask = nullptr;
8480   if (Legal->isMaskRequired(I))
8481     Mask = createBlockInMask(I->getParent(), Plan);
8482 
8483   // Determine if the pointer operand of the access is either consecutive or
8484   // reverse consecutive.
8485   LoopVectorizationCostModel::InstWidening Decision =
8486       CM.getWideningDecision(I, Range.Start);
8487   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8488   bool Consecutive =
8489       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8490 
8491   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8492     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8493                                               Consecutive, Reverse);
8494 
8495   StoreInst *Store = cast<StoreInst>(I);
8496   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8497                                             Mask, Consecutive, Reverse);
8498 }
8499 
8500 VPWidenIntOrFpInductionRecipe *
8501 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8502                                            ArrayRef<VPValue *> Operands) const {
8503   // Check if this is an integer or fp induction. If so, build the recipe that
8504   // produces its scalar and vector values.
8505   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) {
8506     assert(II->getStartValue() ==
8507            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8508     return new VPWidenIntOrFpInductionRecipe(Phi, Operands[0], *II);
8509   }
8510 
8511   return nullptr;
8512 }
8513 
8514 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8515     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8516     VPlan &Plan) const {
8517   // Optimize the special case where the source is a constant integer
8518   // induction variable. Notice that we can only optimize the 'trunc' case
8519   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8520   // (c) other casts depend on pointer size.
8521 
8522   // Determine whether \p K is a truncation based on an induction variable that
8523   // can be optimized.
8524   auto isOptimizableIVTruncate =
8525       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8526     return [=](ElementCount VF) -> bool {
8527       return CM.isOptimizableIVTruncate(K, VF);
8528     };
8529   };
8530 
8531   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8532           isOptimizableIVTruncate(I), Range)) {
8533 
8534     auto *Phi = cast<PHINode>(I->getOperand(0));
8535     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8536     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8537     return new VPWidenIntOrFpInductionRecipe(Phi, Start, II, I);
8538   }
8539   return nullptr;
8540 }
8541 
8542 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8543                                                 ArrayRef<VPValue *> Operands,
8544                                                 VPlanPtr &Plan) {
8545   // If all incoming values are equal, the incoming VPValue can be used directly
8546   // instead of creating a new VPBlendRecipe.
8547   VPValue *FirstIncoming = Operands[0];
8548   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8549         return FirstIncoming == Inc;
8550       })) {
8551     return Operands[0];
8552   }
8553 
8554   // We know that all PHIs in non-header blocks are converted into selects, so
8555   // we don't have to worry about the insertion order and we can just use the
8556   // builder. At this point we generate the predication tree. There may be
8557   // duplications since this is a simple recursive scan, but future
8558   // optimizations will clean it up.
8559   SmallVector<VPValue *, 2> OperandsWithMask;
8560   unsigned NumIncoming = Phi->getNumIncomingValues();
8561 
8562   for (unsigned In = 0; In < NumIncoming; In++) {
8563     VPValue *EdgeMask =
8564       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8565     assert((EdgeMask || NumIncoming == 1) &&
8566            "Multiple predecessors with one having a full mask");
8567     OperandsWithMask.push_back(Operands[In]);
8568     if (EdgeMask)
8569       OperandsWithMask.push_back(EdgeMask);
8570   }
8571   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8572 }
8573 
8574 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8575                                                    ArrayRef<VPValue *> Operands,
8576                                                    VFRange &Range) const {
8577 
8578   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8579       [this, CI](ElementCount VF) {
8580         return CM.isScalarWithPredication(CI, VF);
8581       },
8582       Range);
8583 
8584   if (IsPredicated)
8585     return nullptr;
8586 
8587   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8588   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8589              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8590              ID == Intrinsic::pseudoprobe ||
8591              ID == Intrinsic::experimental_noalias_scope_decl))
8592     return nullptr;
8593 
8594   auto willWiden = [&](ElementCount VF) -> bool {
8595     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8596     // The following case may be scalarized depending on the VF.
8597     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8598     // version of the instruction.
8599     // Is it beneficial to perform intrinsic call compared to lib call?
8600     bool NeedToScalarize = false;
8601     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8602     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8603     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8604     return UseVectorIntrinsic || !NeedToScalarize;
8605   };
8606 
8607   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8608     return nullptr;
8609 
8610   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8611   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8612 }
8613 
8614 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8615   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8616          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8617   // Instruction should be widened, unless it is scalar after vectorization,
8618   // scalarization is profitable or it is predicated.
8619   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8620     return CM.isScalarAfterVectorization(I, VF) ||
8621            CM.isProfitableToScalarize(I, VF) ||
8622            CM.isScalarWithPredication(I, VF);
8623   };
8624   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8625                                                              Range);
8626 }
8627 
8628 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8629                                            ArrayRef<VPValue *> Operands) const {
8630   auto IsVectorizableOpcode = [](unsigned Opcode) {
8631     switch (Opcode) {
8632     case Instruction::Add:
8633     case Instruction::And:
8634     case Instruction::AShr:
8635     case Instruction::BitCast:
8636     case Instruction::FAdd:
8637     case Instruction::FCmp:
8638     case Instruction::FDiv:
8639     case Instruction::FMul:
8640     case Instruction::FNeg:
8641     case Instruction::FPExt:
8642     case Instruction::FPToSI:
8643     case Instruction::FPToUI:
8644     case Instruction::FPTrunc:
8645     case Instruction::FRem:
8646     case Instruction::FSub:
8647     case Instruction::ICmp:
8648     case Instruction::IntToPtr:
8649     case Instruction::LShr:
8650     case Instruction::Mul:
8651     case Instruction::Or:
8652     case Instruction::PtrToInt:
8653     case Instruction::SDiv:
8654     case Instruction::Select:
8655     case Instruction::SExt:
8656     case Instruction::Shl:
8657     case Instruction::SIToFP:
8658     case Instruction::SRem:
8659     case Instruction::Sub:
8660     case Instruction::Trunc:
8661     case Instruction::UDiv:
8662     case Instruction::UIToFP:
8663     case Instruction::URem:
8664     case Instruction::Xor:
8665     case Instruction::ZExt:
8666       return true;
8667     }
8668     return false;
8669   };
8670 
8671   if (!IsVectorizableOpcode(I->getOpcode()))
8672     return nullptr;
8673 
8674   // Success: widen this instruction.
8675   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8676 }
8677 
8678 void VPRecipeBuilder::fixHeaderPhis() {
8679   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8680   for (VPHeaderPHIRecipe *R : PhisToFix) {
8681     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8682     VPRecipeBase *IncR =
8683         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8684     R->addOperand(IncR->getVPSingleValue());
8685   }
8686 }
8687 
8688 VPBasicBlock *VPRecipeBuilder::handleReplication(
8689     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8690     VPlanPtr &Plan) {
8691   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8692       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8693       Range);
8694 
8695   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8696       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8697       Range);
8698 
8699   // Even if the instruction is not marked as uniform, there are certain
8700   // intrinsic calls that can be effectively treated as such, so we check for
8701   // them here. Conservatively, we only do this for scalable vectors, since
8702   // for fixed-width VFs we can always fall back on full scalarization.
8703   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8704     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8705     case Intrinsic::assume:
8706     case Intrinsic::lifetime_start:
8707     case Intrinsic::lifetime_end:
8708       // For scalable vectors if one of the operands is variant then we still
8709       // want to mark as uniform, which will generate one instruction for just
8710       // the first lane of the vector. We can't scalarize the call in the same
8711       // way as for fixed-width vectors because we don't know how many lanes
8712       // there are.
8713       //
8714       // The reasons for doing it this way for scalable vectors are:
8715       //   1. For the assume intrinsic generating the instruction for the first
8716       //      lane is still be better than not generating any at all. For
8717       //      example, the input may be a splat across all lanes.
8718       //   2. For the lifetime start/end intrinsics the pointer operand only
8719       //      does anything useful when the input comes from a stack object,
8720       //      which suggests it should always be uniform. For non-stack objects
8721       //      the effect is to poison the object, which still allows us to
8722       //      remove the call.
8723       IsUniform = true;
8724       break;
8725     default:
8726       break;
8727     }
8728   }
8729 
8730   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8731                                        IsUniform, IsPredicated);
8732   setRecipe(I, Recipe);
8733   Plan->addVPValue(I, Recipe);
8734 
8735   // Find if I uses a predicated instruction. If so, it will use its scalar
8736   // value. Avoid hoisting the insert-element which packs the scalar value into
8737   // a vector value, as that happens iff all users use the vector value.
8738   for (VPValue *Op : Recipe->operands()) {
8739     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8740     if (!PredR)
8741       continue;
8742     auto *RepR =
8743         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8744     assert(RepR->isPredicated() &&
8745            "expected Replicate recipe to be predicated");
8746     RepR->setAlsoPack(false);
8747   }
8748 
8749   // Finalize the recipe for Instr, first if it is not predicated.
8750   if (!IsPredicated) {
8751     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8752     VPBB->appendRecipe(Recipe);
8753     return VPBB;
8754   }
8755   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8756 
8757   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8758   assert(SingleSucc && "VPBB must have a single successor when handling "
8759                        "predicated replication.");
8760   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8761   // Record predicated instructions for above packing optimizations.
8762   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8763   VPBlockUtils::insertBlockAfter(Region, VPBB);
8764   auto *RegSucc = new VPBasicBlock();
8765   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8766   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8767   return RegSucc;
8768 }
8769 
8770 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8771                                                       VPRecipeBase *PredRecipe,
8772                                                       VPlanPtr &Plan) {
8773   // Instructions marked for predication are replicated and placed under an
8774   // if-then construct to prevent side-effects.
8775 
8776   // Generate recipes to compute the block mask for this region.
8777   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8778 
8779   // Build the triangular if-then region.
8780   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8781   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8782   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8783   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8784   auto *PHIRecipe = Instr->getType()->isVoidTy()
8785                         ? nullptr
8786                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8787   if (PHIRecipe) {
8788     Plan->removeVPValueFor(Instr);
8789     Plan->addVPValue(Instr, PHIRecipe);
8790   }
8791   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8792   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8793   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8794 
8795   // Note: first set Entry as region entry and then connect successors starting
8796   // from it in order, to propagate the "parent" of each VPBasicBlock.
8797   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8798   VPBlockUtils::connectBlocks(Pred, Exit);
8799 
8800   return Region;
8801 }
8802 
8803 VPRecipeOrVPValueTy
8804 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8805                                         ArrayRef<VPValue *> Operands,
8806                                         VFRange &Range, VPlanPtr &Plan) {
8807   // First, check for specific widening recipes that deal with calls, memory
8808   // operations, inductions and Phi nodes.
8809   if (auto *CI = dyn_cast<CallInst>(Instr))
8810     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8811 
8812   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8813     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8814 
8815   VPRecipeBase *Recipe;
8816   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8817     if (Phi->getParent() != OrigLoop->getHeader())
8818       return tryToBlend(Phi, Operands, Plan);
8819     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8820       return toVPRecipeResult(Recipe);
8821 
8822     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8823     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8824       VPValue *StartV = Operands[0];
8825       if (Legal->isReductionVariable(Phi)) {
8826         const RecurrenceDescriptor &RdxDesc =
8827             Legal->getReductionVars().find(Phi)->second;
8828         assert(RdxDesc.getRecurrenceStartValue() ==
8829                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8830         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8831                                              CM.isInLoopReduction(Phi),
8832                                              CM.useOrderedReductions(RdxDesc));
8833       } else {
8834         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8835       }
8836 
8837       // Record the incoming value from the backedge, so we can add the incoming
8838       // value from the backedge after all recipes have been created.
8839       recordRecipeOf(cast<Instruction>(
8840           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8841       PhisToFix.push_back(PhiRecipe);
8842     } else {
8843       // TODO: record backedge value for remaining pointer induction phis.
8844       assert(Phi->getType()->isPointerTy() &&
8845              "only pointer phis should be handled here");
8846       assert(Legal->getInductionVars().count(Phi) &&
8847              "Not an induction variable");
8848       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8849       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8850       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8851     }
8852 
8853     return toVPRecipeResult(PhiRecipe);
8854   }
8855 
8856   if (isa<TruncInst>(Instr) &&
8857       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8858                                                Range, *Plan)))
8859     return toVPRecipeResult(Recipe);
8860 
8861   if (!shouldWiden(Instr, Range))
8862     return nullptr;
8863 
8864   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8865     return toVPRecipeResult(new VPWidenGEPRecipe(
8866         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8867 
8868   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8869     bool InvariantCond =
8870         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8871     return toVPRecipeResult(new VPWidenSelectRecipe(
8872         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8873   }
8874 
8875   return toVPRecipeResult(tryToWiden(Instr, Operands));
8876 }
8877 
8878 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8879                                                         ElementCount MaxVF) {
8880   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8881 
8882   // Collect instructions from the original loop that will become trivially dead
8883   // in the vectorized loop. We don't need to vectorize these instructions. For
8884   // example, original induction update instructions can become dead because we
8885   // separately emit induction "steps" when generating code for the new loop.
8886   // Similarly, we create a new latch condition when setting up the structure
8887   // of the new loop, so the old one can become dead.
8888   SmallPtrSet<Instruction *, 4> DeadInstructions;
8889   collectTriviallyDeadInstructions(DeadInstructions);
8890 
8891   // Add assume instructions we need to drop to DeadInstructions, to prevent
8892   // them from being added to the VPlan.
8893   // TODO: We only need to drop assumes in blocks that get flattend. If the
8894   // control flow is preserved, we should keep them.
8895   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8896   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8897 
8898   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8899   // Dead instructions do not need sinking. Remove them from SinkAfter.
8900   for (Instruction *I : DeadInstructions)
8901     SinkAfter.erase(I);
8902 
8903   // Cannot sink instructions after dead instructions (there won't be any
8904   // recipes for them). Instead, find the first non-dead previous instruction.
8905   for (auto &P : Legal->getSinkAfter()) {
8906     Instruction *SinkTarget = P.second;
8907     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8908     (void)FirstInst;
8909     while (DeadInstructions.contains(SinkTarget)) {
8910       assert(
8911           SinkTarget != FirstInst &&
8912           "Must find a live instruction (at least the one feeding the "
8913           "first-order recurrence PHI) before reaching beginning of the block");
8914       SinkTarget = SinkTarget->getPrevNode();
8915       assert(SinkTarget != P.first &&
8916              "sink source equals target, no sinking required");
8917     }
8918     P.second = SinkTarget;
8919   }
8920 
8921   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8922   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8923     VFRange SubRange = {VF, MaxVFPlusOne};
8924     VPlans.push_back(
8925         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8926     VF = SubRange.End;
8927   }
8928 }
8929 
8930 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8931 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8932 // BranchOnCount VPInstruction to the latch.
8933 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8934                                   bool HasNUW, bool IsVPlanNative) {
8935   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8936   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8937 
8938   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8939   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8940   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8941   if (IsVPlanNative)
8942     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8943   Header->insert(CanonicalIVPHI, Header->begin());
8944 
8945   auto *CanonicalIVIncrement =
8946       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8947                                : VPInstruction::CanonicalIVIncrement,
8948                         {CanonicalIVPHI}, DL);
8949   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8950 
8951   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8952   if (IsVPlanNative) {
8953     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8954     EB->setCondBit(nullptr);
8955   }
8956   EB->appendRecipe(CanonicalIVIncrement);
8957 
8958   auto *BranchOnCount =
8959       new VPInstruction(VPInstruction::BranchOnCount,
8960                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8961   EB->appendRecipe(BranchOnCount);
8962 }
8963 
8964 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8965     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8966     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8967 
8968   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8969 
8970   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8971 
8972   // ---------------------------------------------------------------------------
8973   // Pre-construction: record ingredients whose recipes we'll need to further
8974   // process after constructing the initial VPlan.
8975   // ---------------------------------------------------------------------------
8976 
8977   // Mark instructions we'll need to sink later and their targets as
8978   // ingredients whose recipe we'll need to record.
8979   for (auto &Entry : SinkAfter) {
8980     RecipeBuilder.recordRecipeOf(Entry.first);
8981     RecipeBuilder.recordRecipeOf(Entry.second);
8982   }
8983   for (auto &Reduction : CM.getInLoopReductionChains()) {
8984     PHINode *Phi = Reduction.first;
8985     RecurKind Kind =
8986         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8987     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8988 
8989     RecipeBuilder.recordRecipeOf(Phi);
8990     for (auto &R : ReductionOperations) {
8991       RecipeBuilder.recordRecipeOf(R);
8992       // For min/max reducitons, where we have a pair of icmp/select, we also
8993       // need to record the ICmp recipe, so it can be removed later.
8994       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8995              "Only min/max recurrences allowed for inloop reductions");
8996       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8997         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8998     }
8999   }
9000 
9001   // For each interleave group which is relevant for this (possibly trimmed)
9002   // Range, add it to the set of groups to be later applied to the VPlan and add
9003   // placeholders for its members' Recipes which we'll be replacing with a
9004   // single VPInterleaveRecipe.
9005   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9006     auto applyIG = [IG, this](ElementCount VF) -> bool {
9007       return (VF.isVector() && // Query is illegal for VF == 1
9008               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9009                   LoopVectorizationCostModel::CM_Interleave);
9010     };
9011     if (!getDecisionAndClampRange(applyIG, Range))
9012       continue;
9013     InterleaveGroups.insert(IG);
9014     for (unsigned i = 0; i < IG->getFactor(); i++)
9015       if (Instruction *Member = IG->getMember(i))
9016         RecipeBuilder.recordRecipeOf(Member);
9017   };
9018 
9019   // ---------------------------------------------------------------------------
9020   // Build initial VPlan: Scan the body of the loop in a topological order to
9021   // visit each basic block after having visited its predecessor basic blocks.
9022   // ---------------------------------------------------------------------------
9023 
9024   // Create initial VPlan skeleton, with separate header and latch blocks.
9025   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
9026   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
9027   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
9028   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
9029   auto Plan = std::make_unique<VPlan>(TopRegion);
9030 
9031   Instruction *DLInst =
9032       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
9033   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
9034                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
9035                         !CM.foldTailByMasking(), false);
9036 
9037   // Scan the body of the loop in a topological order to visit each basic block
9038   // after having visited its predecessor basic blocks.
9039   LoopBlocksDFS DFS(OrigLoop);
9040   DFS.perform(LI);
9041 
9042   VPBasicBlock *VPBB = HeaderVPBB;
9043   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
9044   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9045     // Relevant instructions from basic block BB will be grouped into VPRecipe
9046     // ingredients and fill a new VPBasicBlock.
9047     unsigned VPBBsForBB = 0;
9048     VPBB->setName(BB->getName());
9049     Builder.setInsertPoint(VPBB);
9050 
9051     // Introduce each ingredient into VPlan.
9052     // TODO: Model and preserve debug instrinsics in VPlan.
9053     for (Instruction &I : BB->instructionsWithoutDebug()) {
9054       Instruction *Instr = &I;
9055 
9056       // First filter out irrelevant instructions, to ensure no recipes are
9057       // built for them.
9058       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9059         continue;
9060 
9061       SmallVector<VPValue *, 4> Operands;
9062       auto *Phi = dyn_cast<PHINode>(Instr);
9063       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9064         Operands.push_back(Plan->getOrAddVPValue(
9065             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9066       } else {
9067         auto OpRange = Plan->mapToVPValues(Instr->operands());
9068         Operands = {OpRange.begin(), OpRange.end()};
9069       }
9070       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9071               Instr, Operands, Range, Plan)) {
9072         // If Instr can be simplified to an existing VPValue, use it.
9073         if (RecipeOrValue.is<VPValue *>()) {
9074           auto *VPV = RecipeOrValue.get<VPValue *>();
9075           Plan->addVPValue(Instr, VPV);
9076           // If the re-used value is a recipe, register the recipe for the
9077           // instruction, in case the recipe for Instr needs to be recorded.
9078           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9079             RecipeBuilder.setRecipe(Instr, R);
9080           continue;
9081         }
9082         // Otherwise, add the new recipe.
9083         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9084         for (auto *Def : Recipe->definedValues()) {
9085           auto *UV = Def->getUnderlyingValue();
9086           Plan->addVPValue(UV, Def);
9087         }
9088 
9089         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
9090             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
9091           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
9092           // of the header block. That can happen for truncates of induction
9093           // variables. Those recipes are moved to the phi section of the header
9094           // block after applying SinkAfter, which relies on the original
9095           // position of the trunc.
9096           assert(isa<TruncInst>(Instr));
9097           InductionsToMove.push_back(
9098               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
9099         }
9100         RecipeBuilder.setRecipe(Instr, Recipe);
9101         VPBB->appendRecipe(Recipe);
9102         continue;
9103       }
9104 
9105       // Otherwise, if all widening options failed, Instruction is to be
9106       // replicated. This may create a successor for VPBB.
9107       VPBasicBlock *NextVPBB =
9108           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9109       if (NextVPBB != VPBB) {
9110         VPBB = NextVPBB;
9111         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9112                                     : "");
9113       }
9114     }
9115 
9116     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
9117     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
9118   }
9119 
9120   // Fold the last, empty block into its predecessor.
9121   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
9122   assert(VPBB && "expected to fold last (empty) block");
9123   // After here, VPBB should not be used.
9124   VPBB = nullptr;
9125 
9126   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
9127          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
9128          "entry block must be set to a VPRegionBlock having a non-empty entry "
9129          "VPBasicBlock");
9130   RecipeBuilder.fixHeaderPhis();
9131 
9132   // ---------------------------------------------------------------------------
9133   // Transform initial VPlan: Apply previously taken decisions, in order, to
9134   // bring the VPlan to its final state.
9135   // ---------------------------------------------------------------------------
9136 
9137   // Apply Sink-After legal constraints.
9138   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9139     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9140     if (Region && Region->isReplicator()) {
9141       assert(Region->getNumSuccessors() == 1 &&
9142              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9143       assert(R->getParent()->size() == 1 &&
9144              "A recipe in an original replicator region must be the only "
9145              "recipe in its block");
9146       return Region;
9147     }
9148     return nullptr;
9149   };
9150   for (auto &Entry : SinkAfter) {
9151     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9152     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9153 
9154     auto *TargetRegion = GetReplicateRegion(Target);
9155     auto *SinkRegion = GetReplicateRegion(Sink);
9156     if (!SinkRegion) {
9157       // If the sink source is not a replicate region, sink the recipe directly.
9158       if (TargetRegion) {
9159         // The target is in a replication region, make sure to move Sink to
9160         // the block after it, not into the replication region itself.
9161         VPBasicBlock *NextBlock =
9162             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9163         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9164       } else
9165         Sink->moveAfter(Target);
9166       continue;
9167     }
9168 
9169     // The sink source is in a replicate region. Unhook the region from the CFG.
9170     auto *SinkPred = SinkRegion->getSinglePredecessor();
9171     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9172     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9173     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9174     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9175 
9176     if (TargetRegion) {
9177       // The target recipe is also in a replicate region, move the sink region
9178       // after the target region.
9179       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9180       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9181       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9182       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9183     } else {
9184       // The sink source is in a replicate region, we need to move the whole
9185       // replicate region, which should only contain a single recipe in the
9186       // main block.
9187       auto *SplitBlock =
9188           Target->getParent()->splitAt(std::next(Target->getIterator()));
9189 
9190       auto *SplitPred = SplitBlock->getSinglePredecessor();
9191 
9192       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9193       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9194       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9195     }
9196   }
9197 
9198   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9199 
9200   // Now that sink-after is done, move induction recipes for optimized truncates
9201   // to the phi section of the header block.
9202   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9203     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9204 
9205   // Adjust the recipes for any inloop reductions.
9206   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9207                              RecipeBuilder, Range.Start);
9208 
9209   // Introduce a recipe to combine the incoming and previous values of a
9210   // first-order recurrence.
9211   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9212     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9213     if (!RecurPhi)
9214       continue;
9215 
9216     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9217     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9218     auto *Region = GetReplicateRegion(PrevRecipe);
9219     if (Region)
9220       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9221     if (Region || PrevRecipe->isPhi())
9222       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9223     else
9224       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9225 
9226     auto *RecurSplice = cast<VPInstruction>(
9227         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9228                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9229 
9230     RecurPhi->replaceAllUsesWith(RecurSplice);
9231     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9232     // all users.
9233     RecurSplice->setOperand(0, RecurPhi);
9234   }
9235 
9236   // Interleave memory: for each Interleave Group we marked earlier as relevant
9237   // for this VPlan, replace the Recipes widening its memory instructions with a
9238   // single VPInterleaveRecipe at its insertion point.
9239   for (auto IG : InterleaveGroups) {
9240     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9241         RecipeBuilder.getRecipe(IG->getInsertPos()));
9242     SmallVector<VPValue *, 4> StoredValues;
9243     for (unsigned i = 0; i < IG->getFactor(); ++i)
9244       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9245         auto *StoreR =
9246             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9247         StoredValues.push_back(StoreR->getStoredValue());
9248       }
9249 
9250     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9251                                         Recipe->getMask());
9252     VPIG->insertBefore(Recipe);
9253     unsigned J = 0;
9254     for (unsigned i = 0; i < IG->getFactor(); ++i)
9255       if (Instruction *Member = IG->getMember(i)) {
9256         if (!Member->getType()->isVoidTy()) {
9257           VPValue *OriginalV = Plan->getVPValue(Member);
9258           Plan->removeVPValueFor(Member);
9259           Plan->addVPValue(Member, VPIG->getVPValue(J));
9260           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9261           J++;
9262         }
9263         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9264       }
9265   }
9266 
9267   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9268   // in ways that accessing values using original IR values is incorrect.
9269   Plan->disableValue2VPValue();
9270 
9271   VPlanTransforms::sinkScalarOperands(*Plan);
9272   VPlanTransforms::mergeReplicateRegions(*Plan);
9273 
9274   std::string PlanName;
9275   raw_string_ostream RSO(PlanName);
9276   ElementCount VF = Range.Start;
9277   Plan->addVF(VF);
9278   RSO << "Initial VPlan for VF={" << VF;
9279   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9280     Plan->addVF(VF);
9281     RSO << "," << VF;
9282   }
9283   RSO << "},UF>=1";
9284   RSO.flush();
9285   Plan->setName(PlanName);
9286 
9287   // Fold Exit block into its predecessor if possible.
9288   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9289   // VPBasicBlock as exit.
9290   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9291 
9292   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9293   return Plan;
9294 }
9295 
9296 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9297   // Outer loop handling: They may require CFG and instruction level
9298   // transformations before even evaluating whether vectorization is profitable.
9299   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9300   // the vectorization pipeline.
9301   assert(!OrigLoop->isInnermost());
9302   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9303 
9304   // Create new empty VPlan
9305   auto Plan = std::make_unique<VPlan>();
9306 
9307   // Build hierarchical CFG
9308   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9309   HCFGBuilder.buildHierarchicalCFG();
9310 
9311   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9312        VF *= 2)
9313     Plan->addVF(VF);
9314 
9315   if (EnableVPlanPredication) {
9316     VPlanPredicator VPP(*Plan);
9317     VPP.predicate();
9318 
9319     // Avoid running transformation to recipes until masked code generation in
9320     // VPlan-native path is in place.
9321     return Plan;
9322   }
9323 
9324   SmallPtrSet<Instruction *, 1> DeadInstructions;
9325   VPlanTransforms::VPInstructionsToVPRecipes(
9326       OrigLoop, Plan,
9327       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9328       DeadInstructions, *PSE.getSE());
9329 
9330   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9331                         true, true);
9332   return Plan;
9333 }
9334 
9335 // Adjust the recipes for reductions. For in-loop reductions the chain of
9336 // instructions leading from the loop exit instr to the phi need to be converted
9337 // to reductions, with one operand being vector and the other being the scalar
9338 // reduction chain. For other reductions, a select is introduced between the phi
9339 // and live-out recipes when folding the tail.
9340 void LoopVectorizationPlanner::adjustRecipesForReductions(
9341     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9342     ElementCount MinVF) {
9343   for (auto &Reduction : CM.getInLoopReductionChains()) {
9344     PHINode *Phi = Reduction.first;
9345     const RecurrenceDescriptor &RdxDesc =
9346         Legal->getReductionVars().find(Phi)->second;
9347     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9348 
9349     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9350       continue;
9351 
9352     // ReductionOperations are orders top-down from the phi's use to the
9353     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9354     // which of the two operands will remain scalar and which will be reduced.
9355     // For minmax the chain will be the select instructions.
9356     Instruction *Chain = Phi;
9357     for (Instruction *R : ReductionOperations) {
9358       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9359       RecurKind Kind = RdxDesc.getRecurrenceKind();
9360 
9361       VPValue *ChainOp = Plan->getVPValue(Chain);
9362       unsigned FirstOpId;
9363       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9364              "Only min/max recurrences allowed for inloop reductions");
9365       // Recognize a call to the llvm.fmuladd intrinsic.
9366       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9367       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9368              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9369       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9370         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9371                "Expected to replace a VPWidenSelectSC");
9372         FirstOpId = 1;
9373       } else {
9374         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9375                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9376                "Expected to replace a VPWidenSC");
9377         FirstOpId = 0;
9378       }
9379       unsigned VecOpId =
9380           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9381       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9382 
9383       auto *CondOp = CM.foldTailByMasking()
9384                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9385                          : nullptr;
9386 
9387       if (IsFMulAdd) {
9388         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9389         // need to create an fmul recipe to use as the vector operand for the
9390         // fadd reduction.
9391         VPInstruction *FMulRecipe = new VPInstruction(
9392             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9393         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9394         WidenRecipe->getParent()->insert(FMulRecipe,
9395                                          WidenRecipe->getIterator());
9396         VecOp = FMulRecipe;
9397       }
9398       VPReductionRecipe *RedRecipe =
9399           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9400       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9401       Plan->removeVPValueFor(R);
9402       Plan->addVPValue(R, RedRecipe);
9403       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9404       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9405       WidenRecipe->eraseFromParent();
9406 
9407       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9408         VPRecipeBase *CompareRecipe =
9409             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9410         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9411                "Expected to replace a VPWidenSC");
9412         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9413                "Expected no remaining users");
9414         CompareRecipe->eraseFromParent();
9415       }
9416       Chain = R;
9417     }
9418   }
9419 
9420   // If tail is folded by masking, introduce selects between the phi
9421   // and the live-out instruction of each reduction, at the beginning of the
9422   // dedicated latch block.
9423   if (CM.foldTailByMasking()) {
9424     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9425     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9426       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9427       if (!PhiR || PhiR->isInLoop())
9428         continue;
9429       VPValue *Cond =
9430           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9431       VPValue *Red = PhiR->getBackedgeValue();
9432       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9433              "reduction recipe must be defined before latch");
9434       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9435     }
9436   }
9437 }
9438 
9439 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9440 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9441                                VPSlotTracker &SlotTracker) const {
9442   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9443   IG->getInsertPos()->printAsOperand(O, false);
9444   O << ", ";
9445   getAddr()->printAsOperand(O, SlotTracker);
9446   VPValue *Mask = getMask();
9447   if (Mask) {
9448     O << ", ";
9449     Mask->printAsOperand(O, SlotTracker);
9450   }
9451 
9452   unsigned OpIdx = 0;
9453   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9454     if (!IG->getMember(i))
9455       continue;
9456     if (getNumStoreOperands() > 0) {
9457       O << "\n" << Indent << "  store ";
9458       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9459       O << " to index " << i;
9460     } else {
9461       O << "\n" << Indent << "  ";
9462       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9463       O << " = load from index " << i;
9464     }
9465     ++OpIdx;
9466   }
9467 }
9468 #endif
9469 
9470 void VPWidenCallRecipe::execute(VPTransformState &State) {
9471   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9472                                   *this, State);
9473 }
9474 
9475 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9476   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9477   State.ILV->setDebugLocFromInst(&I);
9478 
9479   // The condition can be loop invariant  but still defined inside the
9480   // loop. This means that we can't just use the original 'cond' value.
9481   // We have to take the 'vectorized' value and pick the first lane.
9482   // Instcombine will make this a no-op.
9483   auto *InvarCond =
9484       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9485 
9486   for (unsigned Part = 0; Part < State.UF; ++Part) {
9487     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9488     Value *Op0 = State.get(getOperand(1), Part);
9489     Value *Op1 = State.get(getOperand(2), Part);
9490     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9491     State.set(this, Sel, Part);
9492     State.ILV->addMetadata(Sel, &I);
9493   }
9494 }
9495 
9496 void VPWidenRecipe::execute(VPTransformState &State) {
9497   auto &I = *cast<Instruction>(getUnderlyingValue());
9498   auto &Builder = State.Builder;
9499   switch (I.getOpcode()) {
9500   case Instruction::Call:
9501   case Instruction::Br:
9502   case Instruction::PHI:
9503   case Instruction::GetElementPtr:
9504   case Instruction::Select:
9505     llvm_unreachable("This instruction is handled by a different recipe.");
9506   case Instruction::UDiv:
9507   case Instruction::SDiv:
9508   case Instruction::SRem:
9509   case Instruction::URem:
9510   case Instruction::Add:
9511   case Instruction::FAdd:
9512   case Instruction::Sub:
9513   case Instruction::FSub:
9514   case Instruction::FNeg:
9515   case Instruction::Mul:
9516   case Instruction::FMul:
9517   case Instruction::FDiv:
9518   case Instruction::FRem:
9519   case Instruction::Shl:
9520   case Instruction::LShr:
9521   case Instruction::AShr:
9522   case Instruction::And:
9523   case Instruction::Or:
9524   case Instruction::Xor: {
9525     // Just widen unops and binops.
9526     State.ILV->setDebugLocFromInst(&I);
9527 
9528     for (unsigned Part = 0; Part < State.UF; ++Part) {
9529       SmallVector<Value *, 2> Ops;
9530       for (VPValue *VPOp : operands())
9531         Ops.push_back(State.get(VPOp, Part));
9532 
9533       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9534 
9535       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9536         VecOp->copyIRFlags(&I);
9537 
9538         // If the instruction is vectorized and was in a basic block that needed
9539         // predication, we can't propagate poison-generating flags (nuw/nsw,
9540         // exact, etc.). The control flow has been linearized and the
9541         // instruction is no longer guarded by the predicate, which could make
9542         // the flag properties to no longer hold.
9543         if (State.MayGeneratePoisonRecipes.contains(this))
9544           VecOp->dropPoisonGeneratingFlags();
9545       }
9546 
9547       // Use this vector value for all users of the original instruction.
9548       State.set(this, V, Part);
9549       State.ILV->addMetadata(V, &I);
9550     }
9551 
9552     break;
9553   }
9554   case Instruction::ICmp:
9555   case Instruction::FCmp: {
9556     // Widen compares. Generate vector compares.
9557     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9558     auto *Cmp = cast<CmpInst>(&I);
9559     State.ILV->setDebugLocFromInst(Cmp);
9560     for (unsigned Part = 0; Part < State.UF; ++Part) {
9561       Value *A = State.get(getOperand(0), Part);
9562       Value *B = State.get(getOperand(1), Part);
9563       Value *C = nullptr;
9564       if (FCmp) {
9565         // Propagate fast math flags.
9566         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9567         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9568         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9569       } else {
9570         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9571       }
9572       State.set(this, C, Part);
9573       State.ILV->addMetadata(C, &I);
9574     }
9575 
9576     break;
9577   }
9578 
9579   case Instruction::ZExt:
9580   case Instruction::SExt:
9581   case Instruction::FPToUI:
9582   case Instruction::FPToSI:
9583   case Instruction::FPExt:
9584   case Instruction::PtrToInt:
9585   case Instruction::IntToPtr:
9586   case Instruction::SIToFP:
9587   case Instruction::UIToFP:
9588   case Instruction::Trunc:
9589   case Instruction::FPTrunc:
9590   case Instruction::BitCast: {
9591     auto *CI = cast<CastInst>(&I);
9592     State.ILV->setDebugLocFromInst(CI);
9593 
9594     /// Vectorize casts.
9595     Type *DestTy = (State.VF.isScalar())
9596                        ? CI->getType()
9597                        : VectorType::get(CI->getType(), State.VF);
9598 
9599     for (unsigned Part = 0; Part < State.UF; ++Part) {
9600       Value *A = State.get(getOperand(0), Part);
9601       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9602       State.set(this, Cast, Part);
9603       State.ILV->addMetadata(Cast, &I);
9604     }
9605     break;
9606   }
9607   default:
9608     // This instruction is not vectorized by simple widening.
9609     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9610     llvm_unreachable("Unhandled instruction!");
9611   } // end of switch.
9612 }
9613 
9614 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9615   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9616   // Construct a vector GEP by widening the operands of the scalar GEP as
9617   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9618   // results in a vector of pointers when at least one operand of the GEP
9619   // is vector-typed. Thus, to keep the representation compact, we only use
9620   // vector-typed operands for loop-varying values.
9621 
9622   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9623     // If we are vectorizing, but the GEP has only loop-invariant operands,
9624     // the GEP we build (by only using vector-typed operands for
9625     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9626     // produce a vector of pointers, we need to either arbitrarily pick an
9627     // operand to broadcast, or broadcast a clone of the original GEP.
9628     // Here, we broadcast a clone of the original.
9629     //
9630     // TODO: If at some point we decide to scalarize instructions having
9631     //       loop-invariant operands, this special case will no longer be
9632     //       required. We would add the scalarization decision to
9633     //       collectLoopScalars() and teach getVectorValue() to broadcast
9634     //       the lane-zero scalar value.
9635     auto *Clone = State.Builder.Insert(GEP->clone());
9636     for (unsigned Part = 0; Part < State.UF; ++Part) {
9637       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9638       State.set(this, EntryPart, Part);
9639       State.ILV->addMetadata(EntryPart, GEP);
9640     }
9641   } else {
9642     // If the GEP has at least one loop-varying operand, we are sure to
9643     // produce a vector of pointers. But if we are only unrolling, we want
9644     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9645     // produce with the code below will be scalar (if VF == 1) or vector
9646     // (otherwise). Note that for the unroll-only case, we still maintain
9647     // values in the vector mapping with initVector, as we do for other
9648     // instructions.
9649     for (unsigned Part = 0; Part < State.UF; ++Part) {
9650       // The pointer operand of the new GEP. If it's loop-invariant, we
9651       // won't broadcast it.
9652       auto *Ptr = IsPtrLoopInvariant
9653                       ? State.get(getOperand(0), VPIteration(0, 0))
9654                       : State.get(getOperand(0), Part);
9655 
9656       // Collect all the indices for the new GEP. If any index is
9657       // loop-invariant, we won't broadcast it.
9658       SmallVector<Value *, 4> Indices;
9659       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9660         VPValue *Operand = getOperand(I);
9661         if (IsIndexLoopInvariant[I - 1])
9662           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9663         else
9664           Indices.push_back(State.get(Operand, Part));
9665       }
9666 
9667       // If the GEP instruction is vectorized and was in a basic block that
9668       // needed predication, we can't propagate the poison-generating 'inbounds'
9669       // flag. The control flow has been linearized and the GEP is no longer
9670       // guarded by the predicate, which could make the 'inbounds' properties to
9671       // no longer hold.
9672       bool IsInBounds =
9673           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9674 
9675       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9676       // but it should be a vector, otherwise.
9677       auto *NewGEP = IsInBounds
9678                          ? State.Builder.CreateInBoundsGEP(
9679                                GEP->getSourceElementType(), Ptr, Indices)
9680                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9681                                                    Ptr, Indices);
9682       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9683              "NewGEP is not a pointer vector");
9684       State.set(this, NewGEP, Part);
9685       State.ILV->addMetadata(NewGEP, GEP);
9686     }
9687   }
9688 }
9689 
9690 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9691   assert(!State.Instance && "Int or FP induction being replicated.");
9692   auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9693   State.ILV->widenIntOrFpInduction(IV, getInductionDescriptor(),
9694                                    getStartValue()->getLiveInIRValue(),
9695                                    getTruncInst(), this, State, CanonicalIV);
9696 }
9697 
9698 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9699   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9700                                  State);
9701 }
9702 
9703 void VPBlendRecipe::execute(VPTransformState &State) {
9704   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9705   // We know that all PHIs in non-header blocks are converted into
9706   // selects, so we don't have to worry about the insertion order and we
9707   // can just use the builder.
9708   // At this point we generate the predication tree. There may be
9709   // duplications since this is a simple recursive scan, but future
9710   // optimizations will clean it up.
9711 
9712   unsigned NumIncoming = getNumIncomingValues();
9713 
9714   // Generate a sequence of selects of the form:
9715   // SELECT(Mask3, In3,
9716   //        SELECT(Mask2, In2,
9717   //               SELECT(Mask1, In1,
9718   //                      In0)))
9719   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9720   // are essentially undef are taken from In0.
9721   InnerLoopVectorizer::VectorParts Entry(State.UF);
9722   for (unsigned In = 0; In < NumIncoming; ++In) {
9723     for (unsigned Part = 0; Part < State.UF; ++Part) {
9724       // We might have single edge PHIs (blocks) - use an identity
9725       // 'select' for the first PHI operand.
9726       Value *In0 = State.get(getIncomingValue(In), Part);
9727       if (In == 0)
9728         Entry[Part] = In0; // Initialize with the first incoming value.
9729       else {
9730         // Select between the current value and the previous incoming edge
9731         // based on the incoming mask.
9732         Value *Cond = State.get(getMask(In), Part);
9733         Entry[Part] =
9734             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9735       }
9736     }
9737   }
9738   for (unsigned Part = 0; Part < State.UF; ++Part)
9739     State.set(this, Entry[Part], Part);
9740 }
9741 
9742 void VPInterleaveRecipe::execute(VPTransformState &State) {
9743   assert(!State.Instance && "Interleave group being replicated.");
9744   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9745                                       getStoredValues(), getMask());
9746 }
9747 
9748 void VPReductionRecipe::execute(VPTransformState &State) {
9749   assert(!State.Instance && "Reduction being replicated.");
9750   Value *PrevInChain = State.get(getChainOp(), 0);
9751   RecurKind Kind = RdxDesc->getRecurrenceKind();
9752   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9753   // Propagate the fast-math flags carried by the underlying instruction.
9754   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9755   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9756   for (unsigned Part = 0; Part < State.UF; ++Part) {
9757     Value *NewVecOp = State.get(getVecOp(), Part);
9758     if (VPValue *Cond = getCondOp()) {
9759       Value *NewCond = State.get(Cond, Part);
9760       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9761       Value *Iden = RdxDesc->getRecurrenceIdentity(
9762           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9763       Value *IdenVec =
9764           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9765       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9766       NewVecOp = Select;
9767     }
9768     Value *NewRed;
9769     Value *NextInChain;
9770     if (IsOrdered) {
9771       if (State.VF.isVector())
9772         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9773                                         PrevInChain);
9774       else
9775         NewRed = State.Builder.CreateBinOp(
9776             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9777             NewVecOp);
9778       PrevInChain = NewRed;
9779     } else {
9780       PrevInChain = State.get(getChainOp(), Part);
9781       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9782     }
9783     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9784       NextInChain =
9785           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9786                          NewRed, PrevInChain);
9787     } else if (IsOrdered)
9788       NextInChain = NewRed;
9789     else
9790       NextInChain = State.Builder.CreateBinOp(
9791           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9792           PrevInChain);
9793     State.set(this, NextInChain, Part);
9794   }
9795 }
9796 
9797 void VPReplicateRecipe::execute(VPTransformState &State) {
9798   if (State.Instance) { // Generate a single instance.
9799     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9800     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9801                                     IsPredicated, State);
9802     // Insert scalar instance packing it into a vector.
9803     if (AlsoPack && State.VF.isVector()) {
9804       // If we're constructing lane 0, initialize to start from poison.
9805       if (State.Instance->Lane.isFirstLane()) {
9806         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9807         Value *Poison = PoisonValue::get(
9808             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9809         State.set(this, Poison, State.Instance->Part);
9810       }
9811       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9812     }
9813     return;
9814   }
9815 
9816   // Generate scalar instances for all VF lanes of all UF parts, unless the
9817   // instruction is uniform inwhich case generate only the first lane for each
9818   // of the UF parts.
9819   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9820   assert((!State.VF.isScalable() || IsUniform) &&
9821          "Can't scalarize a scalable vector");
9822   for (unsigned Part = 0; Part < State.UF; ++Part)
9823     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9824       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9825                                       VPIteration(Part, Lane), IsPredicated,
9826                                       State);
9827 }
9828 
9829 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9830   assert(State.Instance && "Branch on Mask works only on single instance.");
9831 
9832   unsigned Part = State.Instance->Part;
9833   unsigned Lane = State.Instance->Lane.getKnownLane();
9834 
9835   Value *ConditionBit = nullptr;
9836   VPValue *BlockInMask = getMask();
9837   if (BlockInMask) {
9838     ConditionBit = State.get(BlockInMask, Part);
9839     if (ConditionBit->getType()->isVectorTy())
9840       ConditionBit = State.Builder.CreateExtractElement(
9841           ConditionBit, State.Builder.getInt32(Lane));
9842   } else // Block in mask is all-one.
9843     ConditionBit = State.Builder.getTrue();
9844 
9845   // Replace the temporary unreachable terminator with a new conditional branch,
9846   // whose two destinations will be set later when they are created.
9847   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9848   assert(isa<UnreachableInst>(CurrentTerminator) &&
9849          "Expected to replace unreachable terminator with conditional branch.");
9850   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9851   CondBr->setSuccessor(0, nullptr);
9852   ReplaceInstWithInst(CurrentTerminator, CondBr);
9853 }
9854 
9855 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9856   assert(State.Instance && "Predicated instruction PHI works per instance.");
9857   Instruction *ScalarPredInst =
9858       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9859   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9860   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9861   assert(PredicatingBB && "Predicated block has no single predecessor.");
9862   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9863          "operand must be VPReplicateRecipe");
9864 
9865   // By current pack/unpack logic we need to generate only a single phi node: if
9866   // a vector value for the predicated instruction exists at this point it means
9867   // the instruction has vector users only, and a phi for the vector value is
9868   // needed. In this case the recipe of the predicated instruction is marked to
9869   // also do that packing, thereby "hoisting" the insert-element sequence.
9870   // Otherwise, a phi node for the scalar value is needed.
9871   unsigned Part = State.Instance->Part;
9872   if (State.hasVectorValue(getOperand(0), Part)) {
9873     Value *VectorValue = State.get(getOperand(0), Part);
9874     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9875     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9876     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9877     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9878     if (State.hasVectorValue(this, Part))
9879       State.reset(this, VPhi, Part);
9880     else
9881       State.set(this, VPhi, Part);
9882     // NOTE: Currently we need to update the value of the operand, so the next
9883     // predicated iteration inserts its generated value in the correct vector.
9884     State.reset(getOperand(0), VPhi, Part);
9885   } else {
9886     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9887     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9888     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9889                      PredicatingBB);
9890     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9891     if (State.hasScalarValue(this, *State.Instance))
9892       State.reset(this, Phi, *State.Instance);
9893     else
9894       State.set(this, Phi, *State.Instance);
9895     // NOTE: Currently we need to update the value of the operand, so the next
9896     // predicated iteration inserts its generated value in the correct vector.
9897     State.reset(getOperand(0), Phi, *State.Instance);
9898   }
9899 }
9900 
9901 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9902   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9903 
9904   // Attempt to issue a wide load.
9905   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9906   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9907 
9908   assert((LI || SI) && "Invalid Load/Store instruction");
9909   assert((!SI || StoredValue) && "No stored value provided for widened store");
9910   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9911 
9912   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9913 
9914   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9915   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9916   bool CreateGatherScatter = !Consecutive;
9917 
9918   auto &Builder = State.Builder;
9919   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9920   bool isMaskRequired = getMask();
9921   if (isMaskRequired)
9922     for (unsigned Part = 0; Part < State.UF; ++Part)
9923       BlockInMaskParts[Part] = State.get(getMask(), Part);
9924 
9925   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9926     // Calculate the pointer for the specific unroll-part.
9927     GetElementPtrInst *PartPtr = nullptr;
9928 
9929     bool InBounds = false;
9930     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9931       InBounds = gep->isInBounds();
9932     if (Reverse) {
9933       // If the address is consecutive but reversed, then the
9934       // wide store needs to start at the last vector element.
9935       // RunTimeVF =  VScale * VF.getKnownMinValue()
9936       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9937       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9938       // NumElt = -Part * RunTimeVF
9939       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9940       // LastLane = 1 - RunTimeVF
9941       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9942       PartPtr =
9943           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9944       PartPtr->setIsInBounds(InBounds);
9945       PartPtr = cast<GetElementPtrInst>(
9946           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9947       PartPtr->setIsInBounds(InBounds);
9948       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9949         BlockInMaskParts[Part] =
9950             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9951     } else {
9952       Value *Increment =
9953           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9954       PartPtr = cast<GetElementPtrInst>(
9955           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9956       PartPtr->setIsInBounds(InBounds);
9957     }
9958 
9959     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9960     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9961   };
9962 
9963   // Handle Stores:
9964   if (SI) {
9965     State.ILV->setDebugLocFromInst(SI);
9966 
9967     for (unsigned Part = 0; Part < State.UF; ++Part) {
9968       Instruction *NewSI = nullptr;
9969       Value *StoredVal = State.get(StoredValue, Part);
9970       if (CreateGatherScatter) {
9971         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9972         Value *VectorGep = State.get(getAddr(), Part);
9973         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9974                                             MaskPart);
9975       } else {
9976         if (Reverse) {
9977           // If we store to reverse consecutive memory locations, then we need
9978           // to reverse the order of elements in the stored value.
9979           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
9980           // We don't want to update the value in the map as it might be used in
9981           // another expression. So don't call resetVectorValue(StoredVal).
9982         }
9983         auto *VecPtr =
9984             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
9985         if (isMaskRequired)
9986           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
9987                                             BlockInMaskParts[Part]);
9988         else
9989           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
9990       }
9991       State.ILV->addMetadata(NewSI, SI);
9992     }
9993     return;
9994   }
9995 
9996   // Handle loads.
9997   assert(LI && "Must have a load instruction");
9998   State.ILV->setDebugLocFromInst(LI);
9999   for (unsigned Part = 0; Part < State.UF; ++Part) {
10000     Value *NewLI;
10001     if (CreateGatherScatter) {
10002       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10003       Value *VectorGep = State.get(getAddr(), Part);
10004       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10005                                          nullptr, "wide.masked.gather");
10006       State.ILV->addMetadata(NewLI, LI);
10007     } else {
10008       auto *VecPtr =
10009           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10010       if (isMaskRequired)
10011         NewLI = Builder.CreateMaskedLoad(
10012             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10013             PoisonValue::get(DataTy), "wide.masked.load");
10014       else
10015         NewLI =
10016             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10017 
10018       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10019       State.ILV->addMetadata(NewLI, LI);
10020       if (Reverse)
10021         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10022     }
10023 
10024     State.set(this, NewLI, Part);
10025   }
10026 }
10027 
10028 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10029 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10030 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10031 // for predication.
10032 static ScalarEpilogueLowering getScalarEpilogueLowering(
10033     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10034     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10035     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10036     LoopVectorizationLegality &LVL) {
10037   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10038   // don't look at hints or options, and don't request a scalar epilogue.
10039   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10040   // LoopAccessInfo (due to code dependency and not being able to reliably get
10041   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10042   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10043   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10044   // back to the old way and vectorize with versioning when forced. See D81345.)
10045   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10046                                                       PGSOQueryType::IRPass) &&
10047                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10048     return CM_ScalarEpilogueNotAllowedOptSize;
10049 
10050   // 2) If set, obey the directives
10051   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10052     switch (PreferPredicateOverEpilogue) {
10053     case PreferPredicateTy::ScalarEpilogue:
10054       return CM_ScalarEpilogueAllowed;
10055     case PreferPredicateTy::PredicateElseScalarEpilogue:
10056       return CM_ScalarEpilogueNotNeededUsePredicate;
10057     case PreferPredicateTy::PredicateOrDontVectorize:
10058       return CM_ScalarEpilogueNotAllowedUsePredicate;
10059     };
10060   }
10061 
10062   // 3) If set, obey the hints
10063   switch (Hints.getPredicate()) {
10064   case LoopVectorizeHints::FK_Enabled:
10065     return CM_ScalarEpilogueNotNeededUsePredicate;
10066   case LoopVectorizeHints::FK_Disabled:
10067     return CM_ScalarEpilogueAllowed;
10068   };
10069 
10070   // 4) if the TTI hook indicates this is profitable, request predication.
10071   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10072                                        LVL.getLAI()))
10073     return CM_ScalarEpilogueNotNeededUsePredicate;
10074 
10075   return CM_ScalarEpilogueAllowed;
10076 }
10077 
10078 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10079   // If Values have been set for this Def return the one relevant for \p Part.
10080   if (hasVectorValue(Def, Part))
10081     return Data.PerPartOutput[Def][Part];
10082 
10083   if (!hasScalarValue(Def, {Part, 0})) {
10084     Value *IRV = Def->getLiveInIRValue();
10085     Value *B = ILV->getBroadcastInstrs(IRV);
10086     set(Def, B, Part);
10087     return B;
10088   }
10089 
10090   Value *ScalarValue = get(Def, {Part, 0});
10091   // If we aren't vectorizing, we can just copy the scalar map values over
10092   // to the vector map.
10093   if (VF.isScalar()) {
10094     set(Def, ScalarValue, Part);
10095     return ScalarValue;
10096   }
10097 
10098   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10099   bool IsUniform = RepR && RepR->isUniform();
10100 
10101   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10102   // Check if there is a scalar value for the selected lane.
10103   if (!hasScalarValue(Def, {Part, LastLane})) {
10104     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10105     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
10106            "unexpected recipe found to be invariant");
10107     IsUniform = true;
10108     LastLane = 0;
10109   }
10110 
10111   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10112   // Set the insert point after the last scalarized instruction or after the
10113   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10114   // will directly follow the scalar definitions.
10115   auto OldIP = Builder.saveIP();
10116   auto NewIP =
10117       isa<PHINode>(LastInst)
10118           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10119           : std::next(BasicBlock::iterator(LastInst));
10120   Builder.SetInsertPoint(&*NewIP);
10121 
10122   // However, if we are vectorizing, we need to construct the vector values.
10123   // If the value is known to be uniform after vectorization, we can just
10124   // broadcast the scalar value corresponding to lane zero for each unroll
10125   // iteration. Otherwise, we construct the vector values using
10126   // insertelement instructions. Since the resulting vectors are stored in
10127   // State, we will only generate the insertelements once.
10128   Value *VectorValue = nullptr;
10129   if (IsUniform) {
10130     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10131     set(Def, VectorValue, Part);
10132   } else {
10133     // Initialize packing with insertelements to start from undef.
10134     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10135     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10136     set(Def, Undef, Part);
10137     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10138       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10139     VectorValue = get(Def, Part);
10140   }
10141   Builder.restoreIP(OldIP);
10142   return VectorValue;
10143 }
10144 
10145 // Process the loop in the VPlan-native vectorization path. This path builds
10146 // VPlan upfront in the vectorization pipeline, which allows to apply
10147 // VPlan-to-VPlan transformations from the very beginning without modifying the
10148 // input LLVM IR.
10149 static bool processLoopInVPlanNativePath(
10150     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10151     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10152     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10153     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10154     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10155     LoopVectorizationRequirements &Requirements) {
10156 
10157   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10158     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10159     return false;
10160   }
10161   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10162   Function *F = L->getHeader()->getParent();
10163   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10164 
10165   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10166       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10167 
10168   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10169                                 &Hints, IAI);
10170   // Use the planner for outer loop vectorization.
10171   // TODO: CM is not used at this point inside the planner. Turn CM into an
10172   // optional argument if we don't need it in the future.
10173   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10174                                Requirements, ORE);
10175 
10176   // Get user vectorization factor.
10177   ElementCount UserVF = Hints.getWidth();
10178 
10179   CM.collectElementTypesForWidening();
10180 
10181   // Plan how to best vectorize, return the best VF and its cost.
10182   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10183 
10184   // If we are stress testing VPlan builds, do not attempt to generate vector
10185   // code. Masked vector code generation support will follow soon.
10186   // Also, do not attempt to vectorize if no vector code will be produced.
10187   if (VPlanBuildStressTest || EnableVPlanPredication ||
10188       VectorizationFactor::Disabled() == VF)
10189     return false;
10190 
10191   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10192 
10193   {
10194     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10195                              F->getParent()->getDataLayout());
10196     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10197                            &CM, BFI, PSI, Checks);
10198     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10199                       << L->getHeader()->getParent()->getName() << "\"\n");
10200     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10201   }
10202 
10203   // Mark the loop as already vectorized to avoid vectorizing again.
10204   Hints.setAlreadyVectorized();
10205   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10206   return true;
10207 }
10208 
10209 // Emit a remark if there are stores to floats that required a floating point
10210 // extension. If the vectorized loop was generated with floating point there
10211 // will be a performance penalty from the conversion overhead and the change in
10212 // the vector width.
10213 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10214   SmallVector<Instruction *, 4> Worklist;
10215   for (BasicBlock *BB : L->getBlocks()) {
10216     for (Instruction &Inst : *BB) {
10217       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10218         if (S->getValueOperand()->getType()->isFloatTy())
10219           Worklist.push_back(S);
10220       }
10221     }
10222   }
10223 
10224   // Traverse the floating point stores upwards searching, for floating point
10225   // conversions.
10226   SmallPtrSet<const Instruction *, 4> Visited;
10227   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10228   while (!Worklist.empty()) {
10229     auto *I = Worklist.pop_back_val();
10230     if (!L->contains(I))
10231       continue;
10232     if (!Visited.insert(I).second)
10233       continue;
10234 
10235     // Emit a remark if the floating point store required a floating
10236     // point conversion.
10237     // TODO: More work could be done to identify the root cause such as a
10238     // constant or a function return type and point the user to it.
10239     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10240       ORE->emit([&]() {
10241         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10242                                           I->getDebugLoc(), L->getHeader())
10243                << "floating point conversion changes vector width. "
10244                << "Mixed floating point precision requires an up/down "
10245                << "cast that will negatively impact performance.";
10246       });
10247 
10248     for (Use &Op : I->operands())
10249       if (auto *OpI = dyn_cast<Instruction>(Op))
10250         Worklist.push_back(OpI);
10251   }
10252 }
10253 
10254 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10255     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10256                                !EnableLoopInterleaving),
10257       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10258                               !EnableLoopVectorization) {}
10259 
10260 bool LoopVectorizePass::processLoop(Loop *L) {
10261   assert((EnableVPlanNativePath || L->isInnermost()) &&
10262          "VPlan-native path is not enabled. Only process inner loops.");
10263 
10264 #ifndef NDEBUG
10265   const std::string DebugLocStr = getDebugLocString(L);
10266 #endif /* NDEBUG */
10267 
10268   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
10269                     << L->getHeader()->getParent()->getName() << "\" from "
10270                     << DebugLocStr << "\n");
10271 
10272   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10273 
10274   LLVM_DEBUG(
10275       dbgs() << "LV: Loop hints:"
10276              << " force="
10277              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10278                      ? "disabled"
10279                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10280                             ? "enabled"
10281                             : "?"))
10282              << " width=" << Hints.getWidth()
10283              << " interleave=" << Hints.getInterleave() << "\n");
10284 
10285   // Function containing loop
10286   Function *F = L->getHeader()->getParent();
10287 
10288   // Looking at the diagnostic output is the only way to determine if a loop
10289   // was vectorized (other than looking at the IR or machine code), so it
10290   // is important to generate an optimization remark for each loop. Most of
10291   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10292   // generated as OptimizationRemark and OptimizationRemarkMissed are
10293   // less verbose reporting vectorized loops and unvectorized loops that may
10294   // benefit from vectorization, respectively.
10295 
10296   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10297     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10298     return false;
10299   }
10300 
10301   PredicatedScalarEvolution PSE(*SE, *L);
10302 
10303   // Check if it is legal to vectorize the loop.
10304   LoopVectorizationRequirements Requirements;
10305   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10306                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10307   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10308     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10309     Hints.emitRemarkWithHints();
10310     return false;
10311   }
10312 
10313   // Check the function attributes and profiles to find out if this function
10314   // should be optimized for size.
10315   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10316       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10317 
10318   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10319   // here. They may require CFG and instruction level transformations before
10320   // even evaluating whether vectorization is profitable. Since we cannot modify
10321   // the incoming IR, we need to build VPlan upfront in the vectorization
10322   // pipeline.
10323   if (!L->isInnermost())
10324     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10325                                         ORE, BFI, PSI, Hints, Requirements);
10326 
10327   assert(L->isInnermost() && "Inner loop expected.");
10328 
10329   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10330   // count by optimizing for size, to minimize overheads.
10331   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10332   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10333     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10334                       << "This loop is worth vectorizing only if no scalar "
10335                       << "iteration overheads are incurred.");
10336     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10337       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10338     else {
10339       LLVM_DEBUG(dbgs() << "\n");
10340       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10341     }
10342   }
10343 
10344   // Check the function attributes to see if implicit floats are allowed.
10345   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10346   // an integer loop and the vector instructions selected are purely integer
10347   // vector instructions?
10348   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10349     reportVectorizationFailure(
10350         "Can't vectorize when the NoImplicitFloat attribute is used",
10351         "loop not vectorized due to NoImplicitFloat attribute",
10352         "NoImplicitFloat", ORE, L);
10353     Hints.emitRemarkWithHints();
10354     return false;
10355   }
10356 
10357   // Check if the target supports potentially unsafe FP vectorization.
10358   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10359   // for the target we're vectorizing for, to make sure none of the
10360   // additional fp-math flags can help.
10361   if (Hints.isPotentiallyUnsafe() &&
10362       TTI->isFPVectorizationPotentiallyUnsafe()) {
10363     reportVectorizationFailure(
10364         "Potentially unsafe FP op prevents vectorization",
10365         "loop not vectorized due to unsafe FP support.",
10366         "UnsafeFP", ORE, L);
10367     Hints.emitRemarkWithHints();
10368     return false;
10369   }
10370 
10371   bool AllowOrderedReductions;
10372   // If the flag is set, use that instead and override the TTI behaviour.
10373   if (ForceOrderedReductions.getNumOccurrences() > 0)
10374     AllowOrderedReductions = ForceOrderedReductions;
10375   else
10376     AllowOrderedReductions = TTI->enableOrderedReductions();
10377   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10378     ORE->emit([&]() {
10379       auto *ExactFPMathInst = Requirements.getExactFPInst();
10380       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10381                                                  ExactFPMathInst->getDebugLoc(),
10382                                                  ExactFPMathInst->getParent())
10383              << "loop not vectorized: cannot prove it is safe to reorder "
10384                 "floating-point operations";
10385     });
10386     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10387                          "reorder floating-point operations\n");
10388     Hints.emitRemarkWithHints();
10389     return false;
10390   }
10391 
10392   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10393   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10394 
10395   // If an override option has been passed in for interleaved accesses, use it.
10396   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10397     UseInterleaved = EnableInterleavedMemAccesses;
10398 
10399   // Analyze interleaved memory accesses.
10400   if (UseInterleaved) {
10401     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10402   }
10403 
10404   // Use the cost model.
10405   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10406                                 F, &Hints, IAI);
10407   CM.collectValuesToIgnore();
10408   CM.collectElementTypesForWidening();
10409 
10410   // Use the planner for vectorization.
10411   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10412                                Requirements, ORE);
10413 
10414   // Get user vectorization factor and interleave count.
10415   ElementCount UserVF = Hints.getWidth();
10416   unsigned UserIC = Hints.getInterleave();
10417 
10418   // Plan how to best vectorize, return the best VF and its cost.
10419   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10420 
10421   VectorizationFactor VF = VectorizationFactor::Disabled();
10422   unsigned IC = 1;
10423 
10424   if (MaybeVF) {
10425     VF = *MaybeVF;
10426     // Select the interleave count.
10427     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10428   }
10429 
10430   // Identify the diagnostic messages that should be produced.
10431   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10432   bool VectorizeLoop = true, InterleaveLoop = true;
10433   if (VF.Width.isScalar()) {
10434     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10435     VecDiagMsg = std::make_pair(
10436         "VectorizationNotBeneficial",
10437         "the cost-model indicates that vectorization is not beneficial");
10438     VectorizeLoop = false;
10439   }
10440 
10441   if (!MaybeVF && UserIC > 1) {
10442     // Tell the user interleaving was avoided up-front, despite being explicitly
10443     // requested.
10444     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10445                          "interleaving should be avoided up front\n");
10446     IntDiagMsg = std::make_pair(
10447         "InterleavingAvoided",
10448         "Ignoring UserIC, because interleaving was avoided up front");
10449     InterleaveLoop = false;
10450   } else if (IC == 1 && UserIC <= 1) {
10451     // Tell the user interleaving is not beneficial.
10452     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10453     IntDiagMsg = std::make_pair(
10454         "InterleavingNotBeneficial",
10455         "the cost-model indicates that interleaving is not beneficial");
10456     InterleaveLoop = false;
10457     if (UserIC == 1) {
10458       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10459       IntDiagMsg.second +=
10460           " and is explicitly disabled or interleave count is set to 1";
10461     }
10462   } else if (IC > 1 && UserIC == 1) {
10463     // Tell the user interleaving is beneficial, but it explicitly disabled.
10464     LLVM_DEBUG(
10465         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10466     IntDiagMsg = std::make_pair(
10467         "InterleavingBeneficialButDisabled",
10468         "the cost-model indicates that interleaving is beneficial "
10469         "but is explicitly disabled or interleave count is set to 1");
10470     InterleaveLoop = false;
10471   }
10472 
10473   // Override IC if user provided an interleave count.
10474   IC = UserIC > 0 ? UserIC : IC;
10475 
10476   // Emit diagnostic messages, if any.
10477   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10478   if (!VectorizeLoop && !InterleaveLoop) {
10479     // Do not vectorize or interleaving the loop.
10480     ORE->emit([&]() {
10481       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10482                                       L->getStartLoc(), L->getHeader())
10483              << VecDiagMsg.second;
10484     });
10485     ORE->emit([&]() {
10486       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10487                                       L->getStartLoc(), L->getHeader())
10488              << IntDiagMsg.second;
10489     });
10490     return false;
10491   } else if (!VectorizeLoop && InterleaveLoop) {
10492     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10493     ORE->emit([&]() {
10494       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10495                                         L->getStartLoc(), L->getHeader())
10496              << VecDiagMsg.second;
10497     });
10498   } else if (VectorizeLoop && !InterleaveLoop) {
10499     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10500                       << ") in " << DebugLocStr << '\n');
10501     ORE->emit([&]() {
10502       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10503                                         L->getStartLoc(), L->getHeader())
10504              << IntDiagMsg.second;
10505     });
10506   } else if (VectorizeLoop && InterleaveLoop) {
10507     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10508                       << ") in " << DebugLocStr << '\n');
10509     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10510   }
10511 
10512   bool DisableRuntimeUnroll = false;
10513   MDNode *OrigLoopID = L->getLoopID();
10514   {
10515     // Optimistically generate runtime checks. Drop them if they turn out to not
10516     // be profitable. Limit the scope of Checks, so the cleanup happens
10517     // immediately after vector codegeneration is done.
10518     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10519                              F->getParent()->getDataLayout());
10520     if (!VF.Width.isScalar() || IC > 1)
10521       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10522 
10523     using namespace ore;
10524     if (!VectorizeLoop) {
10525       assert(IC > 1 && "interleave count should not be 1 or 0");
10526       // If we decided that it is not legal to vectorize the loop, then
10527       // interleave it.
10528       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10529                                  &CM, BFI, PSI, Checks);
10530 
10531       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10532       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10533 
10534       ORE->emit([&]() {
10535         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10536                                   L->getHeader())
10537                << "interleaved loop (interleaved count: "
10538                << NV("InterleaveCount", IC) << ")";
10539       });
10540     } else {
10541       // If we decided that it is *legal* to vectorize the loop, then do it.
10542 
10543       // Consider vectorizing the epilogue too if it's profitable.
10544       VectorizationFactor EpilogueVF =
10545           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10546       if (EpilogueVF.Width.isVector()) {
10547 
10548         // The first pass vectorizes the main loop and creates a scalar epilogue
10549         // to be vectorized by executing the plan (potentially with a different
10550         // factor) again shortly afterwards.
10551         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10552         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10553                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10554 
10555         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10556         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10557                         DT);
10558         ++LoopsVectorized;
10559 
10560         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10561         formLCSSARecursively(*L, *DT, LI, SE);
10562 
10563         // Second pass vectorizes the epilogue and adjusts the control flow
10564         // edges from the first pass.
10565         EPI.MainLoopVF = EPI.EpilogueVF;
10566         EPI.MainLoopUF = EPI.EpilogueUF;
10567         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10568                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10569                                                  Checks);
10570 
10571         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10572         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10573                         DT);
10574         ++LoopsEpilogueVectorized;
10575 
10576         if (!MainILV.areSafetyChecksAdded())
10577           DisableRuntimeUnroll = true;
10578       } else {
10579         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10580                                &LVL, &CM, BFI, PSI, Checks);
10581 
10582         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10583         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10584         ++LoopsVectorized;
10585 
10586         // Add metadata to disable runtime unrolling a scalar loop when there
10587         // are no runtime checks about strides and memory. A scalar loop that is
10588         // rarely used is not worth unrolling.
10589         if (!LB.areSafetyChecksAdded())
10590           DisableRuntimeUnroll = true;
10591       }
10592       // Report the vectorization decision.
10593       ORE->emit([&]() {
10594         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10595                                   L->getHeader())
10596                << "vectorized loop (vectorization width: "
10597                << NV("VectorizationFactor", VF.Width)
10598                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10599       });
10600     }
10601 
10602     if (ORE->allowExtraAnalysis(LV_NAME))
10603       checkMixedPrecision(L, ORE);
10604   }
10605 
10606   Optional<MDNode *> RemainderLoopID =
10607       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10608                                       LLVMLoopVectorizeFollowupEpilogue});
10609   if (RemainderLoopID.hasValue()) {
10610     L->setLoopID(RemainderLoopID.getValue());
10611   } else {
10612     if (DisableRuntimeUnroll)
10613       AddRuntimeUnrollDisableMetaData(L);
10614 
10615     // Mark the loop as already vectorized to avoid vectorizing again.
10616     Hints.setAlreadyVectorized();
10617   }
10618 
10619   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10620   return true;
10621 }
10622 
10623 LoopVectorizeResult LoopVectorizePass::runImpl(
10624     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10625     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10626     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10627     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10628     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10629   SE = &SE_;
10630   LI = &LI_;
10631   TTI = &TTI_;
10632   DT = &DT_;
10633   BFI = &BFI_;
10634   TLI = TLI_;
10635   AA = &AA_;
10636   AC = &AC_;
10637   GetLAA = &GetLAA_;
10638   DB = &DB_;
10639   ORE = &ORE_;
10640   PSI = PSI_;
10641 
10642   // Don't attempt if
10643   // 1. the target claims to have no vector registers, and
10644   // 2. interleaving won't help ILP.
10645   //
10646   // The second condition is necessary because, even if the target has no
10647   // vector registers, loop vectorization may still enable scalar
10648   // interleaving.
10649   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10650       TTI->getMaxInterleaveFactor(1) < 2)
10651     return LoopVectorizeResult(false, false);
10652 
10653   bool Changed = false, CFGChanged = false;
10654 
10655   // The vectorizer requires loops to be in simplified form.
10656   // Since simplification may add new inner loops, it has to run before the
10657   // legality and profitability checks. This means running the loop vectorizer
10658   // will simplify all loops, regardless of whether anything end up being
10659   // vectorized.
10660   for (auto &L : *LI)
10661     Changed |= CFGChanged |=
10662         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10663 
10664   // Build up a worklist of inner-loops to vectorize. This is necessary as
10665   // the act of vectorizing or partially unrolling a loop creates new loops
10666   // and can invalidate iterators across the loops.
10667   SmallVector<Loop *, 8> Worklist;
10668 
10669   for (Loop *L : *LI)
10670     collectSupportedLoops(*L, LI, ORE, Worklist);
10671 
10672   LoopsAnalyzed += Worklist.size();
10673 
10674   // Now walk the identified inner loops.
10675   while (!Worklist.empty()) {
10676     Loop *L = Worklist.pop_back_val();
10677 
10678     // For the inner loops we actually process, form LCSSA to simplify the
10679     // transform.
10680     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10681 
10682     Changed |= CFGChanged |= processLoop(L);
10683   }
10684 
10685   // Process each loop nest in the function.
10686   return LoopVectorizeResult(Changed, CFGChanged);
10687 }
10688 
10689 PreservedAnalyses LoopVectorizePass::run(Function &F,
10690                                          FunctionAnalysisManager &AM) {
10691     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10692     auto &LI = AM.getResult<LoopAnalysis>(F);
10693     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10694     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10695     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10696     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10697     auto &AA = AM.getResult<AAManager>(F);
10698     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10699     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10700     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10701 
10702     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10703     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10704         [&](Loop &L) -> const LoopAccessInfo & {
10705       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10706                                         TLI, TTI, nullptr, nullptr, nullptr};
10707       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10708     };
10709     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10710     ProfileSummaryInfo *PSI =
10711         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10712     LoopVectorizeResult Result =
10713         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10714     if (!Result.MadeAnyChange)
10715       return PreservedAnalyses::all();
10716     PreservedAnalyses PA;
10717 
10718     // We currently do not preserve loopinfo/dominator analyses with outer loop
10719     // vectorization. Until this is addressed, mark these analyses as preserved
10720     // only for non-VPlan-native path.
10721     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10722     if (!EnableVPlanNativePath) {
10723       PA.preserve<LoopAnalysis>();
10724       PA.preserve<DominatorTreeAnalysis>();
10725     }
10726 
10727     if (Result.MadeCFGChange) {
10728       // Making CFG changes likely means a loop got vectorized. Indicate that
10729       // extra simplification passes should be run.
10730       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10731       // be run if runtime checks have been added.
10732       AM.getResult<ShouldRunExtraVectorPasses>(F);
10733       PA.preserve<ShouldRunExtraVectorPasses>();
10734     } else {
10735       PA.preserveSet<CFGAnalyses>();
10736     }
10737     return PA;
10738 }
10739 
10740 void LoopVectorizePass::printPipeline(
10741     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10742   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10743       OS, MapClassName2PassName);
10744 
10745   OS << "<";
10746   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10747   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10748   OS << ">";
10749 }
10750