1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/Metadata.h"
116 #include "llvm/IR/Module.h"
117 #include "llvm/IR/Operator.h"
118 #include "llvm/IR/PatternMatch.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <functional>
147 #include <iterator>
148 #include <limits>
149 #include <memory>
150 #include <string>
151 #include <tuple>
152 #include <utility>
153 
154 using namespace llvm;
155 
156 #define LV_NAME "loop-vectorize"
157 #define DEBUG_TYPE LV_NAME
158 
159 #ifndef NDEBUG
160 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
161 #endif
162 
163 /// @{
164 /// Metadata attribute names
165 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
166 const char LLVMLoopVectorizeFollowupVectorized[] =
167     "llvm.loop.vectorize.followup_vectorized";
168 const char LLVMLoopVectorizeFollowupEpilogue[] =
169     "llvm.loop.vectorize.followup_epilogue";
170 /// @}
171 
172 STATISTIC(LoopsVectorized, "Number of loops vectorized");
173 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
174 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
175 
176 static cl::opt<bool> EnableEpilogueVectorization(
177     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
178     cl::desc("Enable vectorization of epilogue loops."));
179 
180 static cl::opt<unsigned> EpilogueVectorizationForceVF(
181     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
182     cl::desc("When epilogue vectorization is enabled, and a value greater than "
183              "1 is specified, forces the given VF for all applicable epilogue "
184              "loops."));
185 
186 static cl::opt<unsigned> EpilogueVectorizationMinVF(
187     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
188     cl::desc("Only loops with vectorization factor equal to or larger than "
189              "the specified value are considered for epilogue vectorization."));
190 
191 /// Loops with a known constant trip count below this number are vectorized only
192 /// if no scalar iteration overheads are incurred.
193 static cl::opt<unsigned> TinyTripCountVectorThreshold(
194     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
195     cl::desc("Loops with a constant trip count that is smaller than this "
196              "value are vectorized only if no scalar iteration overheads "
197              "are incurred."));
198 
199 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
200     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
201     cl::desc("The maximum allowed number of runtime memory checks with a "
202              "vectorize(enable) pragma."));
203 
204 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
205 // that predication is preferred, and this lists all options. I.e., the
206 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
207 // and predicate the instructions accordingly. If tail-folding fails, there are
208 // different fallback strategies depending on these values:
209 namespace PreferPredicateTy {
210   enum Option {
211     ScalarEpilogue = 0,
212     PredicateElseScalarEpilogue,
213     PredicateOrDontVectorize
214   };
215 } // namespace PreferPredicateTy
216 
217 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
218     "prefer-predicate-over-epilogue",
219     cl::init(PreferPredicateTy::ScalarEpilogue),
220     cl::Hidden,
221     cl::desc("Tail-folding and predication preferences over creating a scalar "
222              "epilogue loop."),
223     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
224                          "scalar-epilogue",
225                          "Don't tail-predicate loops, create scalar epilogue"),
226               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
227                          "predicate-else-scalar-epilogue",
228                          "prefer tail-folding, create scalar epilogue if tail "
229                          "folding fails."),
230               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
231                          "predicate-dont-vectorize",
232                          "prefers tail-folding, don't attempt vectorization if "
233                          "tail-folding fails.")));
234 
235 static cl::opt<bool> MaximizeBandwidth(
236     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
237     cl::desc("Maximize bandwidth when selecting vectorization factor which "
238              "will be determined by the smallest type in loop."));
239 
240 static cl::opt<bool> EnableInterleavedMemAccesses(
241     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
242     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
243 
244 /// An interleave-group may need masking if it resides in a block that needs
245 /// predication, or in order to mask away gaps.
246 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
247     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
248     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
249 
250 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
251     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
252     cl::desc("We don't interleave loops with a estimated constant trip count "
253              "below this number"));
254 
255 static cl::opt<unsigned> ForceTargetNumScalarRegs(
256     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
257     cl::desc("A flag that overrides the target's number of scalar registers."));
258 
259 static cl::opt<unsigned> ForceTargetNumVectorRegs(
260     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
261     cl::desc("A flag that overrides the target's number of vector registers."));
262 
263 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
264     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
265     cl::desc("A flag that overrides the target's max interleave factor for "
266              "scalar loops."));
267 
268 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
269     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
270     cl::desc("A flag that overrides the target's max interleave factor for "
271              "vectorized loops."));
272 
273 static cl::opt<unsigned> ForceTargetInstructionCost(
274     "force-target-instruction-cost", cl::init(0), cl::Hidden,
275     cl::desc("A flag that overrides the target's expected cost for "
276              "an instruction to a single constant value. Mostly "
277              "useful for getting consistent testing."));
278 
279 static cl::opt<bool> ForceTargetSupportsScalableVectors(
280     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
281     cl::desc(
282         "Pretend that scalable vectors are supported, even if the target does "
283         "not support them. This flag should only be used for testing."));
284 
285 static cl::opt<unsigned> SmallLoopCost(
286     "small-loop-cost", cl::init(20), cl::Hidden,
287     cl::desc(
288         "The cost of a loop that is considered 'small' by the interleaver."));
289 
290 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
291     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
292     cl::desc("Enable the use of the block frequency analysis to access PGO "
293              "heuristics minimizing code growth in cold regions and being more "
294              "aggressive in hot regions."));
295 
296 // Runtime interleave loops for load/store throughput.
297 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
298     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
299     cl::desc(
300         "Enable runtime interleaving until load/store ports are saturated"));
301 
302 /// Interleave small loops with scalar reductions.
303 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
304     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
305     cl::desc("Enable interleaving for loops with small iteration counts that "
306              "contain scalar reductions to expose ILP."));
307 
308 /// The number of stores in a loop that are allowed to need predication.
309 static cl::opt<unsigned> NumberOfStoresToPredicate(
310     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
311     cl::desc("Max number of stores to be predicated behind an if."));
312 
313 static cl::opt<bool> EnableIndVarRegisterHeur(
314     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
315     cl::desc("Count the induction variable only once when interleaving"));
316 
317 static cl::opt<bool> EnableCondStoresVectorization(
318     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
319     cl::desc("Enable if predication of stores during vectorization."));
320 
321 static cl::opt<unsigned> MaxNestedScalarReductionIC(
322     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
323     cl::desc("The maximum interleave count to use when interleaving a scalar "
324              "reduction in a nested loop."));
325 
326 static cl::opt<bool>
327     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
328                            cl::Hidden,
329                            cl::desc("Prefer in-loop vector reductions, "
330                                     "overriding the targets preference."));
331 
332 static cl::opt<bool> ForceOrderedReductions(
333     "force-ordered-reductions", cl::init(false), cl::Hidden,
334     cl::desc("Enable the vectorisation of loops with in-order (strict) "
335              "FP reductions"));
336 
337 static cl::opt<bool> PreferPredicatedReductionSelect(
338     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
339     cl::desc(
340         "Prefer predicating a reduction operation over an after loop select."));
341 
342 cl::opt<bool> EnableVPlanNativePath(
343     "enable-vplan-native-path", cl::init(false), cl::Hidden,
344     cl::desc("Enable VPlan-native vectorization path with "
345              "support for outer loop vectorization."));
346 
347 // FIXME: Remove this switch once we have divergence analysis. Currently we
348 // assume divergent non-backedge branches when this switch is true.
349 cl::opt<bool> EnableVPlanPredication(
350     "enable-vplan-predication", cl::init(false), cl::Hidden,
351     cl::desc("Enable VPlan-native vectorization path predicator with "
352              "support for outer loop vectorization."));
353 
354 // This flag enables the stress testing of the VPlan H-CFG construction in the
355 // VPlan-native vectorization path. It must be used in conjuction with
356 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
357 // verification of the H-CFGs built.
358 static cl::opt<bool> VPlanBuildStressTest(
359     "vplan-build-stress-test", cl::init(false), cl::Hidden,
360     cl::desc(
361         "Build VPlan for every supported loop nest in the function and bail "
362         "out right after the build (stress test the VPlan H-CFG construction "
363         "in the VPlan-native vectorization path)."));
364 
365 cl::opt<bool> llvm::EnableLoopInterleaving(
366     "interleave-loops", cl::init(true), cl::Hidden,
367     cl::desc("Enable loop interleaving in Loop vectorization passes"));
368 cl::opt<bool> llvm::EnableLoopVectorization(
369     "vectorize-loops", cl::init(true), cl::Hidden,
370     cl::desc("Run the Loop vectorization passes"));
371 
372 cl::opt<bool> PrintVPlansInDotFormat(
373     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
374     cl::desc("Use dot format instead of plain text when dumping VPlans"));
375 
376 /// A helper function that returns true if the given type is irregular. The
377 /// type is irregular if its allocated size doesn't equal the store size of an
378 /// element of the corresponding vector type.
379 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
380   // Determine if an array of N elements of type Ty is "bitcast compatible"
381   // with a <N x Ty> vector.
382   // This is only true if there is no padding between the array elements.
383   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
384 }
385 
386 /// A helper function that returns the reciprocal of the block probability of
387 /// predicated blocks. If we return X, we are assuming the predicated block
388 /// will execute once for every X iterations of the loop header.
389 ///
390 /// TODO: We should use actual block probability here, if available. Currently,
391 ///       we always assume predicated blocks have a 50% chance of executing.
392 static unsigned getReciprocalPredBlockProb() { return 2; }
393 
394 /// A helper function that returns an integer or floating-point constant with
395 /// value C.
396 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
397   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
398                            : ConstantFP::get(Ty, C);
399 }
400 
401 /// Returns "best known" trip count for the specified loop \p L as defined by
402 /// the following procedure:
403 ///   1) Returns exact trip count if it is known.
404 ///   2) Returns expected trip count according to profile data if any.
405 ///   3) Returns upper bound estimate if it is known.
406 ///   4) Returns None if all of the above failed.
407 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
408   // Check if exact trip count is known.
409   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
410     return ExpectedTC;
411 
412   // Check if there is an expected trip count available from profile data.
413   if (LoopVectorizeWithBlockFrequency)
414     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
415       return EstimatedTC;
416 
417   // Check if upper bound estimate is known.
418   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
419     return ExpectedTC;
420 
421   return None;
422 }
423 
424 // Forward declare GeneratedRTChecks.
425 class GeneratedRTChecks;
426 
427 namespace llvm {
428 
429 AnalysisKey ShouldRunExtraVectorPasses::Key;
430 
431 /// InnerLoopVectorizer vectorizes loops which contain only one basic
432 /// block to a specified vectorization factor (VF).
433 /// This class performs the widening of scalars into vectors, or multiple
434 /// scalars. This class also implements the following features:
435 /// * It inserts an epilogue loop for handling loops that don't have iteration
436 ///   counts that are known to be a multiple of the vectorization factor.
437 /// * It handles the code generation for reduction variables.
438 /// * Scalarization (implementation using scalars) of un-vectorizable
439 ///   instructions.
440 /// InnerLoopVectorizer does not perform any vectorization-legality
441 /// checks, and relies on the caller to check for the different legality
442 /// aspects. The InnerLoopVectorizer relies on the
443 /// LoopVectorizationLegality class to provide information about the induction
444 /// and reduction variables that were found to a given vectorization factor.
445 class InnerLoopVectorizer {
446 public:
447   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
448                       LoopInfo *LI, DominatorTree *DT,
449                       const TargetLibraryInfo *TLI,
450                       const TargetTransformInfo *TTI, AssumptionCache *AC,
451                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
452                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
453                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
454                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
455       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
456         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
457         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
458         PSI(PSI), RTChecks(RTChecks) {
459     // Query this against the original loop and save it here because the profile
460     // of the original loop header may change as the transformation happens.
461     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
462         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
463   }
464 
465   virtual ~InnerLoopVectorizer() = default;
466 
467   /// Create a new empty loop that will contain vectorized instructions later
468   /// on, while the old loop will be used as the scalar remainder. Control flow
469   /// is generated around the vectorized (and scalar epilogue) loops consisting
470   /// of various checks and bypasses. Return the pre-header block of the new
471   /// loop and the start value for the canonical induction, if it is != 0. The
472   /// latter is the case when vectorizing the epilogue loop. In the case of
473   /// epilogue vectorization, this function is overriden to handle the more
474   /// complex control flow around the loops.
475   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
476 
477   /// Widen a single call instruction within the innermost loop.
478   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
479                             VPTransformState &State);
480 
481   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
482   void fixVectorizedLoop(VPTransformState &State);
483 
484   // Return true if any runtime check is added.
485   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
486 
487   /// A type for vectorized values in the new loop. Each value from the
488   /// original loop, when vectorized, is represented by UF vector values in the
489   /// new unrolled loop, where UF is the unroll factor.
490   using VectorParts = SmallVector<Value *, 2>;
491 
492   /// Vectorize a single first-order recurrence or pointer induction PHINode in
493   /// a block. This method handles the induction variable canonicalization. It
494   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
495   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
496                            VPTransformState &State);
497 
498   /// A helper function to scalarize a single Instruction in the innermost loop.
499   /// Generates a sequence of scalar instances for each lane between \p MinLane
500   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
501   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
502   /// Instr's operands.
503   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
504                             const VPIteration &Instance, bool IfPredicateInstr,
505                             VPTransformState &State);
506 
507   /// Construct the vector value of a scalarized value \p V one lane at a time.
508   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
509                                  VPTransformState &State);
510 
511   /// Try to vectorize interleaved access group \p Group with the base address
512   /// given in \p Addr, optionally masking the vector operations if \p
513   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
514   /// values in the vectorized loop.
515   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
516                                 ArrayRef<VPValue *> VPDefs,
517                                 VPTransformState &State, VPValue *Addr,
518                                 ArrayRef<VPValue *> StoredValues,
519                                 VPValue *BlockInMask = nullptr);
520 
521   /// Set the debug location in the builder \p Ptr using the debug location in
522   /// \p V. If \p Ptr is None then it uses the class member's Builder.
523   void setDebugLocFromInst(const Value *V,
524                            Optional<IRBuilderBase *> CustomBuilder = None);
525 
526   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
527   void fixNonInductionPHIs(VPTransformState &State);
528 
529   /// Returns true if the reordering of FP operations is not allowed, but we are
530   /// able to vectorize with strict in-order reductions for the given RdxDesc.
531   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
532 
533   /// Create a broadcast instruction. This method generates a broadcast
534   /// instruction (shuffle) for loop invariant values and for the induction
535   /// value. If this is the induction variable then we extend it to N, N+1, ...
536   /// this is needed because each iteration in the loop corresponds to a SIMD
537   /// element.
538   virtual Value *getBroadcastInstrs(Value *V);
539 
540   /// Add metadata from one instruction to another.
541   ///
542   /// This includes both the original MDs from \p From and additional ones (\see
543   /// addNewMetadata).  Use this for *newly created* instructions in the vector
544   /// loop.
545   void addMetadata(Instruction *To, Instruction *From);
546 
547   /// Similar to the previous function but it adds the metadata to a
548   /// vector of instructions.
549   void addMetadata(ArrayRef<Value *> To, Instruction *From);
550 
551   // Returns the resume value (bc.merge.rdx) for a reduction as
552   // generated by fixReduction.
553   PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
554 
555 protected:
556   friend class LoopVectorizationPlanner;
557 
558   /// A small list of PHINodes.
559   using PhiVector = SmallVector<PHINode *, 4>;
560 
561   /// A type for scalarized values in the new loop. Each value from the
562   /// original loop, when scalarized, is represented by UF x VF scalar values
563   /// in the new unrolled loop, where UF is the unroll factor and VF is the
564   /// vectorization factor.
565   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
566 
567   /// Set up the values of the IVs correctly when exiting the vector loop.
568   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
569                     Value *CountRoundDown, Value *EndValue,
570                     BasicBlock *MiddleBlock, BasicBlock *VectorHeader);
571 
572   /// Introduce a conditional branch (on true, condition to be set later) at the
573   /// end of the header=latch connecting it to itself (across the backedge) and
574   /// to the exit block of \p L.
575   void createHeaderBranch(Loop *L);
576 
577   /// Handle all cross-iteration phis in the header.
578   void fixCrossIterationPHIs(VPTransformState &State);
579 
580   /// Create the exit value of first order recurrences in the middle block and
581   /// update their users.
582   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
583                                VPTransformState &State);
584 
585   /// Create code for the loop exit value of the reduction.
586   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
587 
588   /// Clear NSW/NUW flags from reduction instructions if necessary.
589   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
590                                VPTransformState &State);
591 
592   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
593   /// means we need to add the appropriate incoming value from the middle
594   /// block as exiting edges from the scalar epilogue loop (if present) are
595   /// already in place, and we exit the vector loop exclusively to the middle
596   /// block.
597   void fixLCSSAPHIs(VPTransformState &State);
598 
599   /// Iteratively sink the scalarized operands of a predicated instruction into
600   /// the block that was created for it.
601   void sinkScalarOperands(Instruction *PredInst);
602 
603   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
604   /// represented as.
605   void truncateToMinimalBitwidths(VPTransformState &State);
606 
607   /// Returns (and creates if needed) the original loop trip count.
608   Value *getOrCreateTripCount(BasicBlock *InsertBlock);
609 
610   /// Returns (and creates if needed) the trip count of the widened loop.
611   Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock);
612 
613   /// Returns a bitcasted value to the requested vector type.
614   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
615   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
616                                 const DataLayout &DL);
617 
618   /// Emit a bypass check to see if the vector trip count is zero, including if
619   /// it overflows.
620   void emitMinimumIterationCountCheck(BasicBlock *Bypass);
621 
622   /// Emit a bypass check to see if all of the SCEV assumptions we've
623   /// had to make are correct. Returns the block containing the checks or
624   /// nullptr if no checks have been added.
625   BasicBlock *emitSCEVChecks(BasicBlock *Bypass);
626 
627   /// Emit bypass checks to check any memory assumptions we may have made.
628   /// Returns the block containing the checks or nullptr if no checks have been
629   /// added.
630   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass);
631 
632   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
633   /// vector loop preheader, middle block and scalar preheader. Also
634   /// allocate a loop object for the new vector loop and return it.
635   Loop *createVectorLoopSkeleton(StringRef Prefix);
636 
637   /// Create new phi nodes for the induction variables to resume iteration count
638   /// in the scalar epilogue, from where the vectorized loop left off.
639   /// In cases where the loop skeleton is more complicated (eg. epilogue
640   /// vectorization) and the resume values can come from an additional bypass
641   /// block, the \p AdditionalBypass pair provides information about the bypass
642   /// block and the end value on the edge from bypass to this loop.
643   void createInductionResumeValues(
644       Loop *L,
645       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
646 
647   /// Complete the loop skeleton by adding debug MDs, creating appropriate
648   /// conditional branches in the middle block, preparing the builder and
649   /// running the verifier. Take in the vector loop \p L as argument, and return
650   /// the preheader of the completed vector loop.
651   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
652 
653   /// Add additional metadata to \p To that was not present on \p Orig.
654   ///
655   /// Currently this is used to add the noalias annotations based on the
656   /// inserted memchecks.  Use this for instructions that are *cloned* into the
657   /// vector loop.
658   void addNewMetadata(Instruction *To, const Instruction *Orig);
659 
660   /// Collect poison-generating recipes that may generate a poison value that is
661   /// used after vectorization, even when their operands are not poison. Those
662   /// recipes meet the following conditions:
663   ///  * Contribute to the address computation of a recipe generating a widen
664   ///    memory load/store (VPWidenMemoryInstructionRecipe or
665   ///    VPInterleaveRecipe).
666   ///  * Such a widen memory load/store has at least one underlying Instruction
667   ///    that is in a basic block that needs predication and after vectorization
668   ///    the generated instruction won't be predicated.
669   void collectPoisonGeneratingRecipes(VPTransformState &State);
670 
671   /// Allow subclasses to override and print debug traces before/after vplan
672   /// execution, when trace information is requested.
673   virtual void printDebugTracesAtStart(){};
674   virtual void printDebugTracesAtEnd(){};
675 
676   /// The original loop.
677   Loop *OrigLoop;
678 
679   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
680   /// dynamic knowledge to simplify SCEV expressions and converts them to a
681   /// more usable form.
682   PredicatedScalarEvolution &PSE;
683 
684   /// Loop Info.
685   LoopInfo *LI;
686 
687   /// Dominator Tree.
688   DominatorTree *DT;
689 
690   /// Alias Analysis.
691   AAResults *AA;
692 
693   /// Target Library Info.
694   const TargetLibraryInfo *TLI;
695 
696   /// Target Transform Info.
697   const TargetTransformInfo *TTI;
698 
699   /// Assumption Cache.
700   AssumptionCache *AC;
701 
702   /// Interface to emit optimization remarks.
703   OptimizationRemarkEmitter *ORE;
704 
705   /// LoopVersioning.  It's only set up (non-null) if memchecks were
706   /// used.
707   ///
708   /// This is currently only used to add no-alias metadata based on the
709   /// memchecks.  The actually versioning is performed manually.
710   std::unique_ptr<LoopVersioning> LVer;
711 
712   /// The vectorization SIMD factor to use. Each vector will have this many
713   /// vector elements.
714   ElementCount VF;
715 
716   /// The vectorization unroll factor to use. Each scalar is vectorized to this
717   /// many different vector instructions.
718   unsigned UF;
719 
720   /// The builder that we use
721   IRBuilder<> Builder;
722 
723   // --- Vectorization state ---
724 
725   /// The vector-loop preheader.
726   BasicBlock *LoopVectorPreHeader;
727 
728   /// The scalar-loop preheader.
729   BasicBlock *LoopScalarPreHeader;
730 
731   /// Middle Block between the vector and the scalar.
732   BasicBlock *LoopMiddleBlock;
733 
734   /// The unique ExitBlock of the scalar loop if one exists.  Note that
735   /// there can be multiple exiting edges reaching this block.
736   BasicBlock *LoopExitBlock;
737 
738   /// The scalar loop body.
739   BasicBlock *LoopScalarBody;
740 
741   /// A list of all bypass blocks. The first block is the entry of the loop.
742   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
743 
744   /// Store instructions that were predicated.
745   SmallVector<Instruction *, 4> PredicatedInstructions;
746 
747   /// Trip count of the original loop.
748   Value *TripCount = nullptr;
749 
750   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
751   Value *VectorTripCount = nullptr;
752 
753   /// The legality analysis.
754   LoopVectorizationLegality *Legal;
755 
756   /// The profitablity analysis.
757   LoopVectorizationCostModel *Cost;
758 
759   // Record whether runtime checks are added.
760   bool AddedSafetyChecks = false;
761 
762   // Holds the end values for each induction variable. We save the end values
763   // so we can later fix-up the external users of the induction variables.
764   DenseMap<PHINode *, Value *> IVEndValues;
765 
766   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
767   // fixed up at the end of vector code generation.
768   SmallVector<PHINode *, 8> OrigPHIsToFix;
769 
770   /// BFI and PSI are used to check for profile guided size optimizations.
771   BlockFrequencyInfo *BFI;
772   ProfileSummaryInfo *PSI;
773 
774   // Whether this loop should be optimized for size based on profile guided size
775   // optimizatios.
776   bool OptForSizeBasedOnProfile;
777 
778   /// Structure to hold information about generated runtime checks, responsible
779   /// for cleaning the checks, if vectorization turns out unprofitable.
780   GeneratedRTChecks &RTChecks;
781 
782   // Holds the resume values for reductions in the loops, used to set the
783   // correct start value of reduction PHIs when vectorizing the epilogue.
784   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
785       ReductionResumeValues;
786 };
787 
788 class InnerLoopUnroller : public InnerLoopVectorizer {
789 public:
790   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
791                     LoopInfo *LI, DominatorTree *DT,
792                     const TargetLibraryInfo *TLI,
793                     const TargetTransformInfo *TTI, AssumptionCache *AC,
794                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
795                     LoopVectorizationLegality *LVL,
796                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
797                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
798       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
799                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
800                             BFI, PSI, Check) {}
801 
802 private:
803   Value *getBroadcastInstrs(Value *V) override;
804 };
805 
806 /// Encapsulate information regarding vectorization of a loop and its epilogue.
807 /// This information is meant to be updated and used across two stages of
808 /// epilogue vectorization.
809 struct EpilogueLoopVectorizationInfo {
810   ElementCount MainLoopVF = ElementCount::getFixed(0);
811   unsigned MainLoopUF = 0;
812   ElementCount EpilogueVF = ElementCount::getFixed(0);
813   unsigned EpilogueUF = 0;
814   BasicBlock *MainLoopIterationCountCheck = nullptr;
815   BasicBlock *EpilogueIterationCountCheck = nullptr;
816   BasicBlock *SCEVSafetyCheck = nullptr;
817   BasicBlock *MemSafetyCheck = nullptr;
818   Value *TripCount = nullptr;
819   Value *VectorTripCount = nullptr;
820 
821   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
822                                 ElementCount EVF, unsigned EUF)
823       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
824     assert(EUF == 1 &&
825            "A high UF for the epilogue loop is likely not beneficial.");
826   }
827 };
828 
829 /// An extension of the inner loop vectorizer that creates a skeleton for a
830 /// vectorized loop that has its epilogue (residual) also vectorized.
831 /// The idea is to run the vplan on a given loop twice, firstly to setup the
832 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
833 /// from the first step and vectorize the epilogue.  This is achieved by
834 /// deriving two concrete strategy classes from this base class and invoking
835 /// them in succession from the loop vectorizer planner.
836 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
837 public:
838   InnerLoopAndEpilogueVectorizer(
839       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
840       DominatorTree *DT, const TargetLibraryInfo *TLI,
841       const TargetTransformInfo *TTI, AssumptionCache *AC,
842       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
843       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
844       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
845       GeneratedRTChecks &Checks)
846       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
847                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
848                             Checks),
849         EPI(EPI) {}
850 
851   // Override this function to handle the more complex control flow around the
852   // three loops.
853   std::pair<BasicBlock *, Value *>
854   createVectorizedLoopSkeleton() final override {
855     return createEpilogueVectorizedLoopSkeleton();
856   }
857 
858   /// The interface for creating a vectorized skeleton using one of two
859   /// different strategies, each corresponding to one execution of the vplan
860   /// as described above.
861   virtual std::pair<BasicBlock *, Value *>
862   createEpilogueVectorizedLoopSkeleton() = 0;
863 
864   /// Holds and updates state information required to vectorize the main loop
865   /// and its epilogue in two separate passes. This setup helps us avoid
866   /// regenerating and recomputing runtime safety checks. It also helps us to
867   /// shorten the iteration-count-check path length for the cases where the
868   /// iteration count of the loop is so small that the main vector loop is
869   /// completely skipped.
870   EpilogueLoopVectorizationInfo &EPI;
871 };
872 
873 /// A specialized derived class of inner loop vectorizer that performs
874 /// vectorization of *main* loops in the process of vectorizing loops and their
875 /// epilogues.
876 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
877 public:
878   EpilogueVectorizerMainLoop(
879       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
880       DominatorTree *DT, const TargetLibraryInfo *TLI,
881       const TargetTransformInfo *TTI, AssumptionCache *AC,
882       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
883       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
884       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
885       GeneratedRTChecks &Check)
886       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
887                                        EPI, LVL, CM, BFI, PSI, Check) {}
888   /// Implements the interface for creating a vectorized skeleton using the
889   /// *main loop* strategy (ie the first pass of vplan execution).
890   std::pair<BasicBlock *, Value *>
891   createEpilogueVectorizedLoopSkeleton() final override;
892 
893 protected:
894   /// Emits an iteration count bypass check once for the main loop (when \p
895   /// ForEpilogue is false) and once for the epilogue loop (when \p
896   /// ForEpilogue is true).
897   BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass,
898                                              bool ForEpilogue);
899   void printDebugTracesAtStart() override;
900   void printDebugTracesAtEnd() override;
901 };
902 
903 // A specialized derived class of inner loop vectorizer that performs
904 // vectorization of *epilogue* loops in the process of vectorizing loops and
905 // their epilogues.
906 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
907 public:
908   EpilogueVectorizerEpilogueLoop(
909       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
910       DominatorTree *DT, const TargetLibraryInfo *TLI,
911       const TargetTransformInfo *TTI, AssumptionCache *AC,
912       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
913       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
914       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
915       GeneratedRTChecks &Checks)
916       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
917                                        EPI, LVL, CM, BFI, PSI, Checks) {}
918   /// Implements the interface for creating a vectorized skeleton using the
919   /// *epilogue loop* strategy (ie the second pass of vplan execution).
920   std::pair<BasicBlock *, Value *>
921   createEpilogueVectorizedLoopSkeleton() final override;
922 
923 protected:
924   /// Emits an iteration count bypass check after the main vector loop has
925   /// finished to see if there are any iterations left to execute by either
926   /// the vector epilogue or the scalar epilogue.
927   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(
928                                                       BasicBlock *Bypass,
929                                                       BasicBlock *Insert);
930   void printDebugTracesAtStart() override;
931   void printDebugTracesAtEnd() override;
932 };
933 } // end namespace llvm
934 
935 /// Look for a meaningful debug location on the instruction or it's
936 /// operands.
937 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
938   if (!I)
939     return I;
940 
941   DebugLoc Empty;
942   if (I->getDebugLoc() != Empty)
943     return I;
944 
945   for (Use &Op : I->operands()) {
946     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
947       if (OpInst->getDebugLoc() != Empty)
948         return OpInst;
949   }
950 
951   return I;
952 }
953 
954 void InnerLoopVectorizer::setDebugLocFromInst(
955     const Value *V, Optional<IRBuilderBase *> CustomBuilder) {
956   IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
957   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
958     const DILocation *DIL = Inst->getDebugLoc();
959 
960     // When a FSDiscriminator is enabled, we don't need to add the multiply
961     // factors to the discriminators.
962     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
963         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
964       // FIXME: For scalable vectors, assume vscale=1.
965       auto NewDIL =
966           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
967       if (NewDIL)
968         B->SetCurrentDebugLocation(NewDIL.getValue());
969       else
970         LLVM_DEBUG(dbgs()
971                    << "Failed to create new discriminator: "
972                    << DIL->getFilename() << " Line: " << DIL->getLine());
973     } else
974       B->SetCurrentDebugLocation(DIL);
975   } else
976     B->SetCurrentDebugLocation(DebugLoc());
977 }
978 
979 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
980 /// is passed, the message relates to that particular instruction.
981 #ifndef NDEBUG
982 static void debugVectorizationMessage(const StringRef Prefix,
983                                       const StringRef DebugMsg,
984                                       Instruction *I) {
985   dbgs() << "LV: " << Prefix << DebugMsg;
986   if (I != nullptr)
987     dbgs() << " " << *I;
988   else
989     dbgs() << '.';
990   dbgs() << '\n';
991 }
992 #endif
993 
994 /// Create an analysis remark that explains why vectorization failed
995 ///
996 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
997 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
998 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
999 /// the location of the remark.  \return the remark object that can be
1000 /// streamed to.
1001 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1002     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1003   Value *CodeRegion = TheLoop->getHeader();
1004   DebugLoc DL = TheLoop->getStartLoc();
1005 
1006   if (I) {
1007     CodeRegion = I->getParent();
1008     // If there is no debug location attached to the instruction, revert back to
1009     // using the loop's.
1010     if (I->getDebugLoc())
1011       DL = I->getDebugLoc();
1012   }
1013 
1014   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1015 }
1016 
1017 namespace llvm {
1018 
1019 /// Return a value for Step multiplied by VF.
1020 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
1021                        int64_t Step) {
1022   assert(Ty->isIntegerTy() && "Expected an integer step");
1023   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1024   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1025 }
1026 
1027 /// Return the runtime value for VF.
1028 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) {
1029   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1030   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1031 }
1032 
1033 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy,
1034                                   ElementCount VF) {
1035   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1036   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1037   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1038   return B.CreateUIToFP(RuntimeVF, FTy);
1039 }
1040 
1041 void reportVectorizationFailure(const StringRef DebugMsg,
1042                                 const StringRef OREMsg, const StringRef ORETag,
1043                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1044                                 Instruction *I) {
1045   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1046   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1047   ORE->emit(
1048       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1049       << "loop not vectorized: " << OREMsg);
1050 }
1051 
1052 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1053                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1054                              Instruction *I) {
1055   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1056   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1057   ORE->emit(
1058       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1059       << Msg);
1060 }
1061 
1062 } // end namespace llvm
1063 
1064 #ifndef NDEBUG
1065 /// \return string containing a file name and a line # for the given loop.
1066 static std::string getDebugLocString(const Loop *L) {
1067   std::string Result;
1068   if (L) {
1069     raw_string_ostream OS(Result);
1070     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1071       LoopDbgLoc.print(OS);
1072     else
1073       // Just print the module name.
1074       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1075     OS.flush();
1076   }
1077   return Result;
1078 }
1079 #endif
1080 
1081 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1082                                          const Instruction *Orig) {
1083   // If the loop was versioned with memchecks, add the corresponding no-alias
1084   // metadata.
1085   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1086     LVer->annotateInstWithNoAlias(To, Orig);
1087 }
1088 
1089 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1090     VPTransformState &State) {
1091 
1092   // Collect recipes in the backward slice of `Root` that may generate a poison
1093   // value that is used after vectorization.
1094   SmallPtrSet<VPRecipeBase *, 16> Visited;
1095   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1096     SmallVector<VPRecipeBase *, 16> Worklist;
1097     Worklist.push_back(Root);
1098 
1099     // Traverse the backward slice of Root through its use-def chain.
1100     while (!Worklist.empty()) {
1101       VPRecipeBase *CurRec = Worklist.back();
1102       Worklist.pop_back();
1103 
1104       if (!Visited.insert(CurRec).second)
1105         continue;
1106 
1107       // Prune search if we find another recipe generating a widen memory
1108       // instruction. Widen memory instructions involved in address computation
1109       // will lead to gather/scatter instructions, which don't need to be
1110       // handled.
1111       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1112           isa<VPInterleaveRecipe>(CurRec) ||
1113           isa<VPScalarIVStepsRecipe>(CurRec) ||
1114           isa<VPCanonicalIVPHIRecipe>(CurRec))
1115         continue;
1116 
1117       // This recipe contributes to the address computation of a widen
1118       // load/store. Collect recipe if its underlying instruction has
1119       // poison-generating flags.
1120       Instruction *Instr = CurRec->getUnderlyingInstr();
1121       if (Instr && Instr->hasPoisonGeneratingFlags())
1122         State.MayGeneratePoisonRecipes.insert(CurRec);
1123 
1124       // Add new definitions to the worklist.
1125       for (VPValue *operand : CurRec->operands())
1126         if (VPDef *OpDef = operand->getDef())
1127           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1128     }
1129   });
1130 
1131   // Traverse all the recipes in the VPlan and collect the poison-generating
1132   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1133   // VPInterleaveRecipe.
1134   auto Iter = depth_first(
1135       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1136   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1137     for (VPRecipeBase &Recipe : *VPBB) {
1138       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1139         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1140         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1141         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1142             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1143           collectPoisonGeneratingInstrsInBackwardSlice(
1144               cast<VPRecipeBase>(AddrDef));
1145       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1146         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1147         if (AddrDef) {
1148           // Check if any member of the interleave group needs predication.
1149           const InterleaveGroup<Instruction> *InterGroup =
1150               InterleaveRec->getInterleaveGroup();
1151           bool NeedPredication = false;
1152           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1153                I < NumMembers; ++I) {
1154             Instruction *Member = InterGroup->getMember(I);
1155             if (Member)
1156               NeedPredication |=
1157                   Legal->blockNeedsPredication(Member->getParent());
1158           }
1159 
1160           if (NeedPredication)
1161             collectPoisonGeneratingInstrsInBackwardSlice(
1162                 cast<VPRecipeBase>(AddrDef));
1163         }
1164       }
1165     }
1166   }
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(Instruction *To,
1170                                       Instruction *From) {
1171   propagateMetadata(To, From);
1172   addNewMetadata(To, From);
1173 }
1174 
1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1176                                       Instruction *From) {
1177   for (Value *V : To) {
1178     if (Instruction *I = dyn_cast<Instruction>(V))
1179       addMetadata(I, From);
1180   }
1181 }
1182 
1183 PHINode *InnerLoopVectorizer::getReductionResumeValue(
1184     const RecurrenceDescriptor &RdxDesc) {
1185   auto It = ReductionResumeValues.find(&RdxDesc);
1186   assert(It != ReductionResumeValues.end() &&
1187          "Expected to find a resume value for the reduction.");
1188   return It->second;
1189 }
1190 
1191 namespace llvm {
1192 
1193 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1194 // lowered.
1195 enum ScalarEpilogueLowering {
1196 
1197   // The default: allowing scalar epilogues.
1198   CM_ScalarEpilogueAllowed,
1199 
1200   // Vectorization with OptForSize: don't allow epilogues.
1201   CM_ScalarEpilogueNotAllowedOptSize,
1202 
1203   // A special case of vectorisation with OptForSize: loops with a very small
1204   // trip count are considered for vectorization under OptForSize, thereby
1205   // making sure the cost of their loop body is dominant, free of runtime
1206   // guards and scalar iteration overheads.
1207   CM_ScalarEpilogueNotAllowedLowTripLoop,
1208 
1209   // Loop hint predicate indicating an epilogue is undesired.
1210   CM_ScalarEpilogueNotNeededUsePredicate,
1211 
1212   // Directive indicating we must either tail fold or not vectorize
1213   CM_ScalarEpilogueNotAllowedUsePredicate
1214 };
1215 
1216 /// ElementCountComparator creates a total ordering for ElementCount
1217 /// for the purposes of using it in a set structure.
1218 struct ElementCountComparator {
1219   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1220     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1221            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1222   }
1223 };
1224 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1225 
1226 /// LoopVectorizationCostModel - estimates the expected speedups due to
1227 /// vectorization.
1228 /// In many cases vectorization is not profitable. This can happen because of
1229 /// a number of reasons. In this class we mainly attempt to predict the
1230 /// expected speedup/slowdowns due to the supported instruction set. We use the
1231 /// TargetTransformInfo to query the different backends for the cost of
1232 /// different operations.
1233 class LoopVectorizationCostModel {
1234 public:
1235   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1236                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1237                              LoopVectorizationLegality *Legal,
1238                              const TargetTransformInfo &TTI,
1239                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1240                              AssumptionCache *AC,
1241                              OptimizationRemarkEmitter *ORE, const Function *F,
1242                              const LoopVectorizeHints *Hints,
1243                              InterleavedAccessInfo &IAI)
1244       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1245         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1246         Hints(Hints), InterleaveInfo(IAI) {}
1247 
1248   /// \return An upper bound for the vectorization factors (both fixed and
1249   /// scalable). If the factors are 0, vectorization and interleaving should be
1250   /// avoided up front.
1251   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1252 
1253   /// \return True if runtime checks are required for vectorization, and false
1254   /// otherwise.
1255   bool runtimeChecksRequired();
1256 
1257   /// \return The most profitable vectorization factor and the cost of that VF.
1258   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1259   /// then this vectorization factor will be selected if vectorization is
1260   /// possible.
1261   VectorizationFactor
1262   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1263 
1264   VectorizationFactor
1265   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1266                                     const LoopVectorizationPlanner &LVP);
1267 
1268   /// Setup cost-based decisions for user vectorization factor.
1269   /// \return true if the UserVF is a feasible VF to be chosen.
1270   bool selectUserVectorizationFactor(ElementCount UserVF) {
1271     collectUniformsAndScalars(UserVF);
1272     collectInstsToScalarize(UserVF);
1273     return expectedCost(UserVF).first.isValid();
1274   }
1275 
1276   /// \return The size (in bits) of the smallest and widest types in the code
1277   /// that needs to be vectorized. We ignore values that remain scalar such as
1278   /// 64 bit loop indices.
1279   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1280 
1281   /// \return The desired interleave count.
1282   /// If interleave count has been specified by metadata it will be returned.
1283   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1284   /// are the selected vectorization factor and the cost of the selected VF.
1285   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1286 
1287   /// Memory access instruction may be vectorized in more than one way.
1288   /// Form of instruction after vectorization depends on cost.
1289   /// This function takes cost-based decisions for Load/Store instructions
1290   /// and collects them in a map. This decisions map is used for building
1291   /// the lists of loop-uniform and loop-scalar instructions.
1292   /// The calculated cost is saved with widening decision in order to
1293   /// avoid redundant calculations.
1294   void setCostBasedWideningDecision(ElementCount VF);
1295 
1296   /// A struct that represents some properties of the register usage
1297   /// of a loop.
1298   struct RegisterUsage {
1299     /// Holds the number of loop invariant values that are used in the loop.
1300     /// The key is ClassID of target-provided register class.
1301     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1302     /// Holds the maximum number of concurrent live intervals in the loop.
1303     /// The key is ClassID of target-provided register class.
1304     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1305   };
1306 
1307   /// \return Returns information about the register usages of the loop for the
1308   /// given vectorization factors.
1309   SmallVector<RegisterUsage, 8>
1310   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1311 
1312   /// Collect values we want to ignore in the cost model.
1313   void collectValuesToIgnore();
1314 
1315   /// Collect all element types in the loop for which widening is needed.
1316   void collectElementTypesForWidening();
1317 
1318   /// Split reductions into those that happen in the loop, and those that happen
1319   /// outside. In loop reductions are collected into InLoopReductionChains.
1320   void collectInLoopReductions();
1321 
1322   /// Returns true if we should use strict in-order reductions for the given
1323   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1324   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1325   /// of FP operations.
1326   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1327     return !Hints->allowReordering() && RdxDesc.isOrdered();
1328   }
1329 
1330   /// \returns The smallest bitwidth each instruction can be represented with.
1331   /// The vector equivalents of these instructions should be truncated to this
1332   /// type.
1333   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1334     return MinBWs;
1335   }
1336 
1337   /// \returns True if it is more profitable to scalarize instruction \p I for
1338   /// vectorization factor \p VF.
1339   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1340     assert(VF.isVector() &&
1341            "Profitable to scalarize relevant only for VF > 1.");
1342 
1343     // Cost model is not run in the VPlan-native path - return conservative
1344     // result until this changes.
1345     if (EnableVPlanNativePath)
1346       return false;
1347 
1348     auto Scalars = InstsToScalarize.find(VF);
1349     assert(Scalars != InstsToScalarize.end() &&
1350            "VF not yet analyzed for scalarization profitability");
1351     return Scalars->second.find(I) != Scalars->second.end();
1352   }
1353 
1354   /// Returns true if \p I is known to be uniform after vectorization.
1355   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1356     if (VF.isScalar())
1357       return true;
1358 
1359     // Cost model is not run in the VPlan-native path - return conservative
1360     // result until this changes.
1361     if (EnableVPlanNativePath)
1362       return false;
1363 
1364     auto UniformsPerVF = Uniforms.find(VF);
1365     assert(UniformsPerVF != Uniforms.end() &&
1366            "VF not yet analyzed for uniformity");
1367     return UniformsPerVF->second.count(I);
1368   }
1369 
1370   /// Returns true if \p I is known to be scalar after vectorization.
1371   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1372     if (VF.isScalar())
1373       return true;
1374 
1375     // Cost model is not run in the VPlan-native path - return conservative
1376     // result until this changes.
1377     if (EnableVPlanNativePath)
1378       return false;
1379 
1380     auto ScalarsPerVF = Scalars.find(VF);
1381     assert(ScalarsPerVF != Scalars.end() &&
1382            "Scalar values are not calculated for VF");
1383     return ScalarsPerVF->second.count(I);
1384   }
1385 
1386   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1387   /// for vectorization factor \p VF.
1388   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1389     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1390            !isProfitableToScalarize(I, VF) &&
1391            !isScalarAfterVectorization(I, VF);
1392   }
1393 
1394   /// Decision that was taken during cost calculation for memory instruction.
1395   enum InstWidening {
1396     CM_Unknown,
1397     CM_Widen,         // For consecutive accesses with stride +1.
1398     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1399     CM_Interleave,
1400     CM_GatherScatter,
1401     CM_Scalarize
1402   };
1403 
1404   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1405   /// instruction \p I and vector width \p VF.
1406   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1407                            InstructionCost Cost) {
1408     assert(VF.isVector() && "Expected VF >=2");
1409     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1410   }
1411 
1412   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1413   /// interleaving group \p Grp and vector width \p VF.
1414   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1415                            ElementCount VF, InstWidening W,
1416                            InstructionCost Cost) {
1417     assert(VF.isVector() && "Expected VF >=2");
1418     /// Broadcast this decicion to all instructions inside the group.
1419     /// But the cost will be assigned to one instruction only.
1420     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1421       if (auto *I = Grp->getMember(i)) {
1422         if (Grp->getInsertPos() == I)
1423           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1424         else
1425           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1426       }
1427     }
1428   }
1429 
1430   /// Return the cost model decision for the given instruction \p I and vector
1431   /// width \p VF. Return CM_Unknown if this instruction did not pass
1432   /// through the cost modeling.
1433   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1434     assert(VF.isVector() && "Expected VF to be a vector VF");
1435     // Cost model is not run in the VPlan-native path - return conservative
1436     // result until this changes.
1437     if (EnableVPlanNativePath)
1438       return CM_GatherScatter;
1439 
1440     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1441     auto Itr = WideningDecisions.find(InstOnVF);
1442     if (Itr == WideningDecisions.end())
1443       return CM_Unknown;
1444     return Itr->second.first;
1445   }
1446 
1447   /// Return the vectorization cost for the given instruction \p I and vector
1448   /// width \p VF.
1449   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1450     assert(VF.isVector() && "Expected VF >=2");
1451     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1452     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1453            "The cost is not calculated");
1454     return WideningDecisions[InstOnVF].second;
1455   }
1456 
1457   /// Return True if instruction \p I is an optimizable truncate whose operand
1458   /// is an induction variable. Such a truncate will be removed by adding a new
1459   /// induction variable with the destination type.
1460   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1461     // If the instruction is not a truncate, return false.
1462     auto *Trunc = dyn_cast<TruncInst>(I);
1463     if (!Trunc)
1464       return false;
1465 
1466     // Get the source and destination types of the truncate.
1467     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1468     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1469 
1470     // If the truncate is free for the given types, return false. Replacing a
1471     // free truncate with an induction variable would add an induction variable
1472     // update instruction to each iteration of the loop. We exclude from this
1473     // check the primary induction variable since it will need an update
1474     // instruction regardless.
1475     Value *Op = Trunc->getOperand(0);
1476     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1477       return false;
1478 
1479     // If the truncated value is not an induction variable, return false.
1480     return Legal->isInductionPhi(Op);
1481   }
1482 
1483   /// Collects the instructions to scalarize for each predicated instruction in
1484   /// the loop.
1485   void collectInstsToScalarize(ElementCount VF);
1486 
1487   /// Collect Uniform and Scalar values for the given \p VF.
1488   /// The sets depend on CM decision for Load/Store instructions
1489   /// that may be vectorized as interleave, gather-scatter or scalarized.
1490   void collectUniformsAndScalars(ElementCount VF) {
1491     // Do the analysis once.
1492     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1493       return;
1494     setCostBasedWideningDecision(VF);
1495     collectLoopUniforms(VF);
1496     collectLoopScalars(VF);
1497   }
1498 
1499   /// Returns true if the target machine supports masked store operation
1500   /// for the given \p DataType and kind of access to \p Ptr.
1501   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1502     return Legal->isConsecutivePtr(DataType, Ptr) &&
1503            TTI.isLegalMaskedStore(DataType, Alignment);
1504   }
1505 
1506   /// Returns true if the target machine supports masked load operation
1507   /// for the given \p DataType and kind of access to \p Ptr.
1508   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1509     return Legal->isConsecutivePtr(DataType, Ptr) &&
1510            TTI.isLegalMaskedLoad(DataType, Alignment);
1511   }
1512 
1513   /// Returns true if the target machine can represent \p V as a masked gather
1514   /// or scatter operation.
1515   bool isLegalGatherOrScatter(Value *V,
1516                               ElementCount VF = ElementCount::getFixed(1)) {
1517     bool LI = isa<LoadInst>(V);
1518     bool SI = isa<StoreInst>(V);
1519     if (!LI && !SI)
1520       return false;
1521     auto *Ty = getLoadStoreType(V);
1522     Align Align = getLoadStoreAlignment(V);
1523     if (VF.isVector())
1524       Ty = VectorType::get(Ty, VF);
1525     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1526            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1527   }
1528 
1529   /// Returns true if the target machine supports all of the reduction
1530   /// variables found for the given VF.
1531   bool canVectorizeReductions(ElementCount VF) const {
1532     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1533       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1534       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1535     }));
1536   }
1537 
1538   /// Returns true if \p I is an instruction that will be scalarized with
1539   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1540   /// instructions include conditional stores and instructions that may divide
1541   /// by zero.
1542   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1543 
1544   // Returns true if \p I is an instruction that will be predicated either
1545   // through scalar predication or masked load/store or masked gather/scatter.
1546   // \p VF is the vectorization factor that will be used to vectorize \p I.
1547   // Superset of instructions that return true for isScalarWithPredication.
1548   bool isPredicatedInst(Instruction *I, ElementCount VF,
1549                         bool IsKnownUniform = false) {
1550     // When we know the load is uniform and the original scalar loop was not
1551     // predicated we don't need to mark it as a predicated instruction. Any
1552     // vectorised blocks created when tail-folding are something artificial we
1553     // have introduced and we know there is always at least one active lane.
1554     // That's why we call Legal->blockNeedsPredication here because it doesn't
1555     // query tail-folding.
1556     if (IsKnownUniform && isa<LoadInst>(I) &&
1557         !Legal->blockNeedsPredication(I->getParent()))
1558       return false;
1559     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1560       return false;
1561     // Loads and stores that need some form of masked operation are predicated
1562     // instructions.
1563     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1564       return Legal->isMaskRequired(I);
1565     return isScalarWithPredication(I, VF);
1566   }
1567 
1568   /// Returns true if \p I is a memory instruction with consecutive memory
1569   /// access that can be widened.
1570   bool
1571   memoryInstructionCanBeWidened(Instruction *I,
1572                                 ElementCount VF = ElementCount::getFixed(1));
1573 
1574   /// Returns true if \p I is a memory instruction in an interleaved-group
1575   /// of memory accesses that can be vectorized with wide vector loads/stores
1576   /// and shuffles.
1577   bool
1578   interleavedAccessCanBeWidened(Instruction *I,
1579                                 ElementCount VF = ElementCount::getFixed(1));
1580 
1581   /// Check if \p Instr belongs to any interleaved access group.
1582   bool isAccessInterleaved(Instruction *Instr) {
1583     return InterleaveInfo.isInterleaved(Instr);
1584   }
1585 
1586   /// Get the interleaved access group that \p Instr belongs to.
1587   const InterleaveGroup<Instruction> *
1588   getInterleavedAccessGroup(Instruction *Instr) {
1589     return InterleaveInfo.getInterleaveGroup(Instr);
1590   }
1591 
1592   /// Returns true if we're required to use a scalar epilogue for at least
1593   /// the final iteration of the original loop.
1594   bool requiresScalarEpilogue(ElementCount VF) const {
1595     if (!isScalarEpilogueAllowed())
1596       return false;
1597     // If we might exit from anywhere but the latch, must run the exiting
1598     // iteration in scalar form.
1599     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1600       return true;
1601     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1602   }
1603 
1604   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1605   /// loop hint annotation.
1606   bool isScalarEpilogueAllowed() const {
1607     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1608   }
1609 
1610   /// Returns true if all loop blocks should be masked to fold tail loop.
1611   bool foldTailByMasking() const { return FoldTailByMasking; }
1612 
1613   /// Returns true if the instructions in this block requires predication
1614   /// for any reason, e.g. because tail folding now requires a predicate
1615   /// or because the block in the original loop was predicated.
1616   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1617     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1618   }
1619 
1620   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1621   /// nodes to the chain of instructions representing the reductions. Uses a
1622   /// MapVector to ensure deterministic iteration order.
1623   using ReductionChainMap =
1624       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1625 
1626   /// Return the chain of instructions representing an inloop reduction.
1627   const ReductionChainMap &getInLoopReductionChains() const {
1628     return InLoopReductionChains;
1629   }
1630 
1631   /// Returns true if the Phi is part of an inloop reduction.
1632   bool isInLoopReduction(PHINode *Phi) const {
1633     return InLoopReductionChains.count(Phi);
1634   }
1635 
1636   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1637   /// with factor VF.  Return the cost of the instruction, including
1638   /// scalarization overhead if it's needed.
1639   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1640 
1641   /// Estimate cost of a call instruction CI if it were vectorized with factor
1642   /// VF. Return the cost of the instruction, including scalarization overhead
1643   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1644   /// scalarized -
1645   /// i.e. either vector version isn't available, or is too expensive.
1646   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1647                                     bool &NeedToScalarize) const;
1648 
1649   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1650   /// that of B.
1651   bool isMoreProfitable(const VectorizationFactor &A,
1652                         const VectorizationFactor &B) const;
1653 
1654   /// Invalidates decisions already taken by the cost model.
1655   void invalidateCostModelingDecisions() {
1656     WideningDecisions.clear();
1657     Uniforms.clear();
1658     Scalars.clear();
1659   }
1660 
1661 private:
1662   unsigned NumPredStores = 0;
1663 
1664   /// Convenience function that returns the value of vscale_range iff
1665   /// vscale_range.min == vscale_range.max or otherwise returns the value
1666   /// returned by the corresponding TLI method.
1667   Optional<unsigned> getVScaleForTuning() const;
1668 
1669   /// \return An upper bound for the vectorization factors for both
1670   /// fixed and scalable vectorization, where the minimum-known number of
1671   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1672   /// disabled or unsupported, then the scalable part will be equal to
1673   /// ElementCount::getScalable(0).
1674   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1675                                            ElementCount UserVF,
1676                                            bool FoldTailByMasking);
1677 
1678   /// \return the maximized element count based on the targets vector
1679   /// registers and the loop trip-count, but limited to a maximum safe VF.
1680   /// This is a helper function of computeFeasibleMaxVF.
1681   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1682   /// issue that occurred on one of the buildbots which cannot be reproduced
1683   /// without having access to the properietary compiler (see comments on
1684   /// D98509). The issue is currently under investigation and this workaround
1685   /// will be removed as soon as possible.
1686   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1687                                        unsigned SmallestType,
1688                                        unsigned WidestType,
1689                                        const ElementCount &MaxSafeVF,
1690                                        bool FoldTailByMasking);
1691 
1692   /// \return the maximum legal scalable VF, based on the safe max number
1693   /// of elements.
1694   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1695 
1696   /// The vectorization cost is a combination of the cost itself and a boolean
1697   /// indicating whether any of the contributing operations will actually
1698   /// operate on vector values after type legalization in the backend. If this
1699   /// latter value is false, then all operations will be scalarized (i.e. no
1700   /// vectorization has actually taken place).
1701   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1702 
1703   /// Returns the expected execution cost. The unit of the cost does
1704   /// not matter because we use the 'cost' units to compare different
1705   /// vector widths. The cost that is returned is *not* normalized by
1706   /// the factor width. If \p Invalid is not nullptr, this function
1707   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1708   /// each instruction that has an Invalid cost for the given VF.
1709   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1710   VectorizationCostTy
1711   expectedCost(ElementCount VF,
1712                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1713 
1714   /// Returns the execution time cost of an instruction for a given vector
1715   /// width. Vector width of one means scalar.
1716   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1717 
1718   /// The cost-computation logic from getInstructionCost which provides
1719   /// the vector type as an output parameter.
1720   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1721                                      Type *&VectorTy);
1722 
1723   /// Return the cost of instructions in an inloop reduction pattern, if I is
1724   /// part of that pattern.
1725   Optional<InstructionCost>
1726   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1727                           TTI::TargetCostKind CostKind);
1728 
1729   /// Calculate vectorization cost of memory instruction \p I.
1730   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1731 
1732   /// The cost computation for scalarized memory instruction.
1733   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1734 
1735   /// The cost computation for interleaving group of memory instructions.
1736   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1737 
1738   /// The cost computation for Gather/Scatter instruction.
1739   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1740 
1741   /// The cost computation for widening instruction \p I with consecutive
1742   /// memory access.
1743   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1744 
1745   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1746   /// Load: scalar load + broadcast.
1747   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1748   /// element)
1749   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1750 
1751   /// Estimate the overhead of scalarizing an instruction. This is a
1752   /// convenience wrapper for the type-based getScalarizationOverhead API.
1753   InstructionCost getScalarizationOverhead(Instruction *I,
1754                                            ElementCount VF) const;
1755 
1756   /// Returns whether the instruction is a load or store and will be a emitted
1757   /// as a vector operation.
1758   bool isConsecutiveLoadOrStore(Instruction *I);
1759 
1760   /// Returns true if an artificially high cost for emulated masked memrefs
1761   /// should be used.
1762   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1763 
1764   /// Map of scalar integer values to the smallest bitwidth they can be legally
1765   /// represented as. The vector equivalents of these values should be truncated
1766   /// to this type.
1767   MapVector<Instruction *, uint64_t> MinBWs;
1768 
1769   /// A type representing the costs for instructions if they were to be
1770   /// scalarized rather than vectorized. The entries are Instruction-Cost
1771   /// pairs.
1772   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1773 
1774   /// A set containing all BasicBlocks that are known to present after
1775   /// vectorization as a predicated block.
1776   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1777 
1778   /// Records whether it is allowed to have the original scalar loop execute at
1779   /// least once. This may be needed as a fallback loop in case runtime
1780   /// aliasing/dependence checks fail, or to handle the tail/remainder
1781   /// iterations when the trip count is unknown or doesn't divide by the VF,
1782   /// or as a peel-loop to handle gaps in interleave-groups.
1783   /// Under optsize and when the trip count is very small we don't allow any
1784   /// iterations to execute in the scalar loop.
1785   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1786 
1787   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1788   bool FoldTailByMasking = false;
1789 
1790   /// A map holding scalar costs for different vectorization factors. The
1791   /// presence of a cost for an instruction in the mapping indicates that the
1792   /// instruction will be scalarized when vectorizing with the associated
1793   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1794   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1795 
1796   /// Holds the instructions known to be uniform after vectorization.
1797   /// The data is collected per VF.
1798   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1799 
1800   /// Holds the instructions known to be scalar after vectorization.
1801   /// The data is collected per VF.
1802   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1803 
1804   /// Holds the instructions (address computations) that are forced to be
1805   /// scalarized.
1806   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1807 
1808   /// PHINodes of the reductions that should be expanded in-loop along with
1809   /// their associated chains of reduction operations, in program order from top
1810   /// (PHI) to bottom
1811   ReductionChainMap InLoopReductionChains;
1812 
1813   /// A Map of inloop reduction operations and their immediate chain operand.
1814   /// FIXME: This can be removed once reductions can be costed correctly in
1815   /// vplan. This was added to allow quick lookup to the inloop operations,
1816   /// without having to loop through InLoopReductionChains.
1817   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1818 
1819   /// Returns the expected difference in cost from scalarizing the expression
1820   /// feeding a predicated instruction \p PredInst. The instructions to
1821   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1822   /// non-negative return value implies the expression will be scalarized.
1823   /// Currently, only single-use chains are considered for scalarization.
1824   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1825                               ElementCount VF);
1826 
1827   /// Collect the instructions that are uniform after vectorization. An
1828   /// instruction is uniform if we represent it with a single scalar value in
1829   /// the vectorized loop corresponding to each vector iteration. Examples of
1830   /// uniform instructions include pointer operands of consecutive or
1831   /// interleaved memory accesses. Note that although uniformity implies an
1832   /// instruction will be scalar, the reverse is not true. In general, a
1833   /// scalarized instruction will be represented by VF scalar values in the
1834   /// vectorized loop, each corresponding to an iteration of the original
1835   /// scalar loop.
1836   void collectLoopUniforms(ElementCount VF);
1837 
1838   /// Collect the instructions that are scalar after vectorization. An
1839   /// instruction is scalar if it is known to be uniform or will be scalarized
1840   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1841   /// to the list if they are used by a load/store instruction that is marked as
1842   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1843   /// VF values in the vectorized loop, each corresponding to an iteration of
1844   /// the original scalar loop.
1845   void collectLoopScalars(ElementCount VF);
1846 
1847   /// Keeps cost model vectorization decision and cost for instructions.
1848   /// Right now it is used for memory instructions only.
1849   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1850                                 std::pair<InstWidening, InstructionCost>>;
1851 
1852   DecisionList WideningDecisions;
1853 
1854   /// Returns true if \p V is expected to be vectorized and it needs to be
1855   /// extracted.
1856   bool needsExtract(Value *V, ElementCount VF) const {
1857     Instruction *I = dyn_cast<Instruction>(V);
1858     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1859         TheLoop->isLoopInvariant(I))
1860       return false;
1861 
1862     // Assume we can vectorize V (and hence we need extraction) if the
1863     // scalars are not computed yet. This can happen, because it is called
1864     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1865     // the scalars are collected. That should be a safe assumption in most
1866     // cases, because we check if the operands have vectorizable types
1867     // beforehand in LoopVectorizationLegality.
1868     return Scalars.find(VF) == Scalars.end() ||
1869            !isScalarAfterVectorization(I, VF);
1870   };
1871 
1872   /// Returns a range containing only operands needing to be extracted.
1873   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1874                                                    ElementCount VF) const {
1875     return SmallVector<Value *, 4>(make_filter_range(
1876         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1877   }
1878 
1879   /// Determines if we have the infrastructure to vectorize loop \p L and its
1880   /// epilogue, assuming the main loop is vectorized by \p VF.
1881   bool isCandidateForEpilogueVectorization(const Loop &L,
1882                                            const ElementCount VF) const;
1883 
1884   /// Returns true if epilogue vectorization is considered profitable, and
1885   /// false otherwise.
1886   /// \p VF is the vectorization factor chosen for the original loop.
1887   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1888 
1889 public:
1890   /// The loop that we evaluate.
1891   Loop *TheLoop;
1892 
1893   /// Predicated scalar evolution analysis.
1894   PredicatedScalarEvolution &PSE;
1895 
1896   /// Loop Info analysis.
1897   LoopInfo *LI;
1898 
1899   /// Vectorization legality.
1900   LoopVectorizationLegality *Legal;
1901 
1902   /// Vector target information.
1903   const TargetTransformInfo &TTI;
1904 
1905   /// Target Library Info.
1906   const TargetLibraryInfo *TLI;
1907 
1908   /// Demanded bits analysis.
1909   DemandedBits *DB;
1910 
1911   /// Assumption cache.
1912   AssumptionCache *AC;
1913 
1914   /// Interface to emit optimization remarks.
1915   OptimizationRemarkEmitter *ORE;
1916 
1917   const Function *TheFunction;
1918 
1919   /// Loop Vectorize Hint.
1920   const LoopVectorizeHints *Hints;
1921 
1922   /// The interleave access information contains groups of interleaved accesses
1923   /// with the same stride and close to each other.
1924   InterleavedAccessInfo &InterleaveInfo;
1925 
1926   /// Values to ignore in the cost model.
1927   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1928 
1929   /// Values to ignore in the cost model when VF > 1.
1930   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1931 
1932   /// All element types found in the loop.
1933   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1934 
1935   /// Profitable vector factors.
1936   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1937 };
1938 } // end namespace llvm
1939 
1940 /// Helper struct to manage generating runtime checks for vectorization.
1941 ///
1942 /// The runtime checks are created up-front in temporary blocks to allow better
1943 /// estimating the cost and un-linked from the existing IR. After deciding to
1944 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1945 /// temporary blocks are completely removed.
1946 class GeneratedRTChecks {
1947   /// Basic block which contains the generated SCEV checks, if any.
1948   BasicBlock *SCEVCheckBlock = nullptr;
1949 
1950   /// The value representing the result of the generated SCEV checks. If it is
1951   /// nullptr, either no SCEV checks have been generated or they have been used.
1952   Value *SCEVCheckCond = nullptr;
1953 
1954   /// Basic block which contains the generated memory runtime checks, if any.
1955   BasicBlock *MemCheckBlock = nullptr;
1956 
1957   /// The value representing the result of the generated memory runtime checks.
1958   /// If it is nullptr, either no memory runtime checks have been generated or
1959   /// they have been used.
1960   Value *MemRuntimeCheckCond = nullptr;
1961 
1962   DominatorTree *DT;
1963   LoopInfo *LI;
1964 
1965   SCEVExpander SCEVExp;
1966   SCEVExpander MemCheckExp;
1967 
1968 public:
1969   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1970                     const DataLayout &DL)
1971       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1972         MemCheckExp(SE, DL, "scev.check") {}
1973 
1974   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1975   /// accurately estimate the cost of the runtime checks. The blocks are
1976   /// un-linked from the IR and is added back during vector code generation. If
1977   /// there is no vector code generation, the check blocks are removed
1978   /// completely.
1979   void Create(Loop *L, const LoopAccessInfo &LAI,
1980               const SCEVPredicate &Pred) {
1981 
1982     BasicBlock *LoopHeader = L->getHeader();
1983     BasicBlock *Preheader = L->getLoopPreheader();
1984 
1985     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1986     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1987     // may be used by SCEVExpander. The blocks will be un-linked from their
1988     // predecessors and removed from LI & DT at the end of the function.
1989     if (!Pred.isAlwaysTrue()) {
1990       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1991                                   nullptr, "vector.scevcheck");
1992 
1993       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1994           &Pred, SCEVCheckBlock->getTerminator());
1995     }
1996 
1997     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1998     if (RtPtrChecking.Need) {
1999       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2000       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2001                                  "vector.memcheck");
2002 
2003       MemRuntimeCheckCond =
2004           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2005                            RtPtrChecking.getChecks(), MemCheckExp);
2006       assert(MemRuntimeCheckCond &&
2007              "no RT checks generated although RtPtrChecking "
2008              "claimed checks are required");
2009     }
2010 
2011     if (!MemCheckBlock && !SCEVCheckBlock)
2012       return;
2013 
2014     // Unhook the temporary block with the checks, update various places
2015     // accordingly.
2016     if (SCEVCheckBlock)
2017       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2018     if (MemCheckBlock)
2019       MemCheckBlock->replaceAllUsesWith(Preheader);
2020 
2021     if (SCEVCheckBlock) {
2022       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2023       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2024       Preheader->getTerminator()->eraseFromParent();
2025     }
2026     if (MemCheckBlock) {
2027       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2028       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2029       Preheader->getTerminator()->eraseFromParent();
2030     }
2031 
2032     DT->changeImmediateDominator(LoopHeader, Preheader);
2033     if (MemCheckBlock) {
2034       DT->eraseNode(MemCheckBlock);
2035       LI->removeBlock(MemCheckBlock);
2036     }
2037     if (SCEVCheckBlock) {
2038       DT->eraseNode(SCEVCheckBlock);
2039       LI->removeBlock(SCEVCheckBlock);
2040     }
2041   }
2042 
2043   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2044   /// unused.
2045   ~GeneratedRTChecks() {
2046     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2047     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2048     if (!SCEVCheckCond)
2049       SCEVCleaner.markResultUsed();
2050 
2051     if (!MemRuntimeCheckCond)
2052       MemCheckCleaner.markResultUsed();
2053 
2054     if (MemRuntimeCheckCond) {
2055       auto &SE = *MemCheckExp.getSE();
2056       // Memory runtime check generation creates compares that use expanded
2057       // values. Remove them before running the SCEVExpanderCleaners.
2058       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2059         if (MemCheckExp.isInsertedInstruction(&I))
2060           continue;
2061         SE.forgetValue(&I);
2062         I.eraseFromParent();
2063       }
2064     }
2065     MemCheckCleaner.cleanup();
2066     SCEVCleaner.cleanup();
2067 
2068     if (SCEVCheckCond)
2069       SCEVCheckBlock->eraseFromParent();
2070     if (MemRuntimeCheckCond)
2071       MemCheckBlock->eraseFromParent();
2072   }
2073 
2074   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2075   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2076   /// depending on the generated condition.
2077   BasicBlock *emitSCEVChecks(BasicBlock *Bypass,
2078                              BasicBlock *LoopVectorPreHeader,
2079                              BasicBlock *LoopExitBlock) {
2080     if (!SCEVCheckCond)
2081       return nullptr;
2082     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2083       if (C->isZero())
2084         return nullptr;
2085 
2086     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2087 
2088     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2089     // Create new preheader for vector loop.
2090     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2091       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2092 
2093     SCEVCheckBlock->getTerminator()->eraseFromParent();
2094     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2095     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2096                                                 SCEVCheckBlock);
2097 
2098     DT->addNewBlock(SCEVCheckBlock, Pred);
2099     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2100 
2101     ReplaceInstWithInst(
2102         SCEVCheckBlock->getTerminator(),
2103         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2104     // Mark the check as used, to prevent it from being removed during cleanup.
2105     SCEVCheckCond = nullptr;
2106     return SCEVCheckBlock;
2107   }
2108 
2109   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2110   /// the branches to branch to the vector preheader or \p Bypass, depending on
2111   /// the generated condition.
2112   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass,
2113                                    BasicBlock *LoopVectorPreHeader) {
2114     // Check if we generated code that checks in runtime if arrays overlap.
2115     if (!MemRuntimeCheckCond)
2116       return nullptr;
2117 
2118     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2119     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2120                                                 MemCheckBlock);
2121 
2122     DT->addNewBlock(MemCheckBlock, Pred);
2123     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2124     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2125 
2126     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2127       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2128 
2129     ReplaceInstWithInst(
2130         MemCheckBlock->getTerminator(),
2131         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2132     MemCheckBlock->getTerminator()->setDebugLoc(
2133         Pred->getTerminator()->getDebugLoc());
2134 
2135     // Mark the check as used, to prevent it from being removed during cleanup.
2136     MemRuntimeCheckCond = nullptr;
2137     return MemCheckBlock;
2138   }
2139 };
2140 
2141 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2142 // vectorization. The loop needs to be annotated with #pragma omp simd
2143 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2144 // vector length information is not provided, vectorization is not considered
2145 // explicit. Interleave hints are not allowed either. These limitations will be
2146 // relaxed in the future.
2147 // Please, note that we are currently forced to abuse the pragma 'clang
2148 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2149 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2150 // provides *explicit vectorization hints* (LV can bypass legal checks and
2151 // assume that vectorization is legal). However, both hints are implemented
2152 // using the same metadata (llvm.loop.vectorize, processed by
2153 // LoopVectorizeHints). This will be fixed in the future when the native IR
2154 // representation for pragma 'omp simd' is introduced.
2155 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2156                                    OptimizationRemarkEmitter *ORE) {
2157   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2158   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2159 
2160   // Only outer loops with an explicit vectorization hint are supported.
2161   // Unannotated outer loops are ignored.
2162   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2163     return false;
2164 
2165   Function *Fn = OuterLp->getHeader()->getParent();
2166   if (!Hints.allowVectorization(Fn, OuterLp,
2167                                 true /*VectorizeOnlyWhenForced*/)) {
2168     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2169     return false;
2170   }
2171 
2172   if (Hints.getInterleave() > 1) {
2173     // TODO: Interleave support is future work.
2174     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2175                          "outer loops.\n");
2176     Hints.emitRemarkWithHints();
2177     return false;
2178   }
2179 
2180   return true;
2181 }
2182 
2183 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2184                                   OptimizationRemarkEmitter *ORE,
2185                                   SmallVectorImpl<Loop *> &V) {
2186   // Collect inner loops and outer loops without irreducible control flow. For
2187   // now, only collect outer loops that have explicit vectorization hints. If we
2188   // are stress testing the VPlan H-CFG construction, we collect the outermost
2189   // loop of every loop nest.
2190   if (L.isInnermost() || VPlanBuildStressTest ||
2191       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2192     LoopBlocksRPO RPOT(&L);
2193     RPOT.perform(LI);
2194     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2195       V.push_back(&L);
2196       // TODO: Collect inner loops inside marked outer loops in case
2197       // vectorization fails for the outer loop. Do not invoke
2198       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2199       // already known to be reducible. We can use an inherited attribute for
2200       // that.
2201       return;
2202     }
2203   }
2204   for (Loop *InnerL : L)
2205     collectSupportedLoops(*InnerL, LI, ORE, V);
2206 }
2207 
2208 namespace {
2209 
2210 /// The LoopVectorize Pass.
2211 struct LoopVectorize : public FunctionPass {
2212   /// Pass identification, replacement for typeid
2213   static char ID;
2214 
2215   LoopVectorizePass Impl;
2216 
2217   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2218                          bool VectorizeOnlyWhenForced = false)
2219       : FunctionPass(ID),
2220         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2221     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2222   }
2223 
2224   bool runOnFunction(Function &F) override {
2225     if (skipFunction(F))
2226       return false;
2227 
2228     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2229     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2230     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2231     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2232     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2233     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2234     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2235     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2236     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2237     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2238     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2239     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2240     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2241 
2242     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2243         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2244 
2245     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2246                         GetLAA, *ORE, PSI).MadeAnyChange;
2247   }
2248 
2249   void getAnalysisUsage(AnalysisUsage &AU) const override {
2250     AU.addRequired<AssumptionCacheTracker>();
2251     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2252     AU.addRequired<DominatorTreeWrapperPass>();
2253     AU.addRequired<LoopInfoWrapperPass>();
2254     AU.addRequired<ScalarEvolutionWrapperPass>();
2255     AU.addRequired<TargetTransformInfoWrapperPass>();
2256     AU.addRequired<AAResultsWrapperPass>();
2257     AU.addRequired<LoopAccessLegacyAnalysis>();
2258     AU.addRequired<DemandedBitsWrapperPass>();
2259     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2260     AU.addRequired<InjectTLIMappingsLegacy>();
2261 
2262     // We currently do not preserve loopinfo/dominator analyses with outer loop
2263     // vectorization. Until this is addressed, mark these analyses as preserved
2264     // only for non-VPlan-native path.
2265     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2266     if (!EnableVPlanNativePath) {
2267       AU.addPreserved<LoopInfoWrapperPass>();
2268       AU.addPreserved<DominatorTreeWrapperPass>();
2269     }
2270 
2271     AU.addPreserved<BasicAAWrapperPass>();
2272     AU.addPreserved<GlobalsAAWrapperPass>();
2273     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2274   }
2275 };
2276 
2277 } // end anonymous namespace
2278 
2279 //===----------------------------------------------------------------------===//
2280 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2281 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2282 //===----------------------------------------------------------------------===//
2283 
2284 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2285   // We need to place the broadcast of invariant variables outside the loop,
2286   // but only if it's proven safe to do so. Else, broadcast will be inside
2287   // vector loop body.
2288   Instruction *Instr = dyn_cast<Instruction>(V);
2289   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2290                      (!Instr ||
2291                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2292   // Place the code for broadcasting invariant variables in the new preheader.
2293   IRBuilder<>::InsertPointGuard Guard(Builder);
2294   if (SafeToHoist)
2295     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2296 
2297   // Broadcast the scalar into all locations in the vector.
2298   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2299 
2300   return Shuf;
2301 }
2302 
2303 /// This function adds
2304 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2305 /// to each vector element of Val. The sequence starts at StartIndex.
2306 /// \p Opcode is relevant for FP induction variable.
2307 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2308                             Instruction::BinaryOps BinOp, ElementCount VF,
2309                             IRBuilderBase &Builder) {
2310   assert(VF.isVector() && "only vector VFs are supported");
2311 
2312   // Create and check the types.
2313   auto *ValVTy = cast<VectorType>(Val->getType());
2314   ElementCount VLen = ValVTy->getElementCount();
2315 
2316   Type *STy = Val->getType()->getScalarType();
2317   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2318          "Induction Step must be an integer or FP");
2319   assert(Step->getType() == STy && "Step has wrong type");
2320 
2321   SmallVector<Constant *, 8> Indices;
2322 
2323   // Create a vector of consecutive numbers from zero to VF.
2324   VectorType *InitVecValVTy = ValVTy;
2325   if (STy->isFloatingPointTy()) {
2326     Type *InitVecValSTy =
2327         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2328     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2329   }
2330   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2331 
2332   // Splat the StartIdx
2333   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2334 
2335   if (STy->isIntegerTy()) {
2336     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2337     Step = Builder.CreateVectorSplat(VLen, Step);
2338     assert(Step->getType() == Val->getType() && "Invalid step vec");
2339     // FIXME: The newly created binary instructions should contain nsw/nuw
2340     // flags, which can be found from the original scalar operations.
2341     Step = Builder.CreateMul(InitVec, Step);
2342     return Builder.CreateAdd(Val, Step, "induction");
2343   }
2344 
2345   // Floating point induction.
2346   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2347          "Binary Opcode should be specified for FP induction");
2348   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2349   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2350 
2351   Step = Builder.CreateVectorSplat(VLen, Step);
2352   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2353   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2354 }
2355 
2356 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2357 /// variable on which to base the steps, \p Step is the size of the step.
2358 static void buildScalarSteps(Value *ScalarIV, Value *Step,
2359                              const InductionDescriptor &ID, VPValue *Def,
2360                              VPTransformState &State) {
2361   IRBuilderBase &Builder = State.Builder;
2362   // We shouldn't have to build scalar steps if we aren't vectorizing.
2363   assert(State.VF.isVector() && "VF should be greater than one");
2364   // Get the value type and ensure it and the step have the same integer type.
2365   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2366   assert(ScalarIVTy == Step->getType() &&
2367          "Val and Step should have the same type");
2368 
2369   // We build scalar steps for both integer and floating-point induction
2370   // variables. Here, we determine the kind of arithmetic we will perform.
2371   Instruction::BinaryOps AddOp;
2372   Instruction::BinaryOps MulOp;
2373   if (ScalarIVTy->isIntegerTy()) {
2374     AddOp = Instruction::Add;
2375     MulOp = Instruction::Mul;
2376   } else {
2377     AddOp = ID.getInductionOpcode();
2378     MulOp = Instruction::FMul;
2379   }
2380 
2381   // Determine the number of scalars we need to generate for each unroll
2382   // iteration.
2383   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
2384   unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2385   // Compute the scalar steps and save the results in State.
2386   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2387                                      ScalarIVTy->getScalarSizeInBits());
2388   Type *VecIVTy = nullptr;
2389   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2390   if (!FirstLaneOnly && State.VF.isScalable()) {
2391     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2392     UnitStepVec =
2393         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2394     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2395     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2396   }
2397 
2398   for (unsigned Part = 0; Part < State.UF; ++Part) {
2399     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2400 
2401     if (!FirstLaneOnly && State.VF.isScalable()) {
2402       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2403       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2404       if (ScalarIVTy->isFloatingPointTy())
2405         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2406       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2407       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2408       State.set(Def, Add, Part);
2409       // It's useful to record the lane values too for the known minimum number
2410       // of elements so we do those below. This improves the code quality when
2411       // trying to extract the first element, for example.
2412     }
2413 
2414     if (ScalarIVTy->isFloatingPointTy())
2415       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2416 
2417     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2418       Value *StartIdx = Builder.CreateBinOp(
2419           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2420       // The step returned by `createStepForVF` is a runtime-evaluated value
2421       // when VF is scalable. Otherwise, it should be folded into a Constant.
2422       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2423              "Expected StartIdx to be folded to a constant when VF is not "
2424              "scalable");
2425       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2426       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2427       State.set(Def, Add, VPIteration(Part, Lane));
2428     }
2429   }
2430 }
2431 
2432 // Generate code for the induction step. Note that induction steps are
2433 // required to be loop-invariant
2434 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE,
2435                               Instruction *InsertBefore,
2436                               Loop *OrigLoop = nullptr) {
2437   const DataLayout &DL = SE.getDataLayout();
2438   assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) &&
2439          "Induction step should be loop invariant");
2440   if (auto *E = dyn_cast<SCEVUnknown>(Step))
2441     return E->getValue();
2442 
2443   SCEVExpander Exp(SE, DL, "induction");
2444   return Exp.expandCodeFor(Step, Step->getType(), InsertBefore);
2445 }
2446 
2447 /// Compute the transformed value of Index at offset StartValue using step
2448 /// StepValue.
2449 /// For integer induction, returns StartValue + Index * StepValue.
2450 /// For pointer induction, returns StartValue[Index * StepValue].
2451 /// FIXME: The newly created binary instructions should contain nsw/nuw
2452 /// flags, which can be found from the original scalar operations.
2453 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index,
2454                                    Value *StartValue, Value *Step,
2455                                    const InductionDescriptor &ID) {
2456   assert(Index->getType()->getScalarType() == Step->getType() &&
2457          "Index scalar type does not match StepValue type");
2458 
2459   // Note: the IR at this point is broken. We cannot use SE to create any new
2460   // SCEV and then expand it, hoping that SCEV's simplification will give us
2461   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2462   // lead to various SCEV crashes. So all we can do is to use builder and rely
2463   // on InstCombine for future simplifications. Here we handle some trivial
2464   // cases only.
2465   auto CreateAdd = [&B](Value *X, Value *Y) {
2466     assert(X->getType() == Y->getType() && "Types don't match!");
2467     if (auto *CX = dyn_cast<ConstantInt>(X))
2468       if (CX->isZero())
2469         return Y;
2470     if (auto *CY = dyn_cast<ConstantInt>(Y))
2471       if (CY->isZero())
2472         return X;
2473     return B.CreateAdd(X, Y);
2474   };
2475 
2476   // We allow X to be a vector type, in which case Y will potentially be
2477   // splatted into a vector with the same element count.
2478   auto CreateMul = [&B](Value *X, Value *Y) {
2479     assert(X->getType()->getScalarType() == Y->getType() &&
2480            "Types don't match!");
2481     if (auto *CX = dyn_cast<ConstantInt>(X))
2482       if (CX->isOne())
2483         return Y;
2484     if (auto *CY = dyn_cast<ConstantInt>(Y))
2485       if (CY->isOne())
2486         return X;
2487     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2488     if (XVTy && !isa<VectorType>(Y->getType()))
2489       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2490     return B.CreateMul(X, Y);
2491   };
2492 
2493   switch (ID.getKind()) {
2494   case InductionDescriptor::IK_IntInduction: {
2495     assert(!isa<VectorType>(Index->getType()) &&
2496            "Vector indices not supported for integer inductions yet");
2497     assert(Index->getType() == StartValue->getType() &&
2498            "Index type does not match StartValue type");
2499     if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2500       return B.CreateSub(StartValue, Index);
2501     auto *Offset = CreateMul(Index, Step);
2502     return CreateAdd(StartValue, Offset);
2503   }
2504   case InductionDescriptor::IK_PtrInduction: {
2505     assert(isa<Constant>(Step) &&
2506            "Expected constant step for pointer induction");
2507     return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step));
2508   }
2509   case InductionDescriptor::IK_FpInduction: {
2510     assert(!isa<VectorType>(Index->getType()) &&
2511            "Vector indices not supported for FP inductions yet");
2512     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2513     auto InductionBinOp = ID.getInductionBinOp();
2514     assert(InductionBinOp &&
2515            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2516             InductionBinOp->getOpcode() == Instruction::FSub) &&
2517            "Original bin op should be defined for FP induction");
2518 
2519     Value *MulExp = B.CreateFMul(Step, Index);
2520     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2521                          "induction");
2522   }
2523   case InductionDescriptor::IK_NoInduction:
2524     return nullptr;
2525   }
2526   llvm_unreachable("invalid enum");
2527 }
2528 
2529 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2530                                                     const VPIteration &Instance,
2531                                                     VPTransformState &State) {
2532   Value *ScalarInst = State.get(Def, Instance);
2533   Value *VectorValue = State.get(Def, Instance.Part);
2534   VectorValue = Builder.CreateInsertElement(
2535       VectorValue, ScalarInst,
2536       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2537   State.set(Def, VectorValue, Instance.Part);
2538 }
2539 
2540 // Return whether we allow using masked interleave-groups (for dealing with
2541 // strided loads/stores that reside in predicated blocks, or for dealing
2542 // with gaps).
2543 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2544   // If an override option has been passed in for interleaved accesses, use it.
2545   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2546     return EnableMaskedInterleavedMemAccesses;
2547 
2548   return TTI.enableMaskedInterleavedAccessVectorization();
2549 }
2550 
2551 // Try to vectorize the interleave group that \p Instr belongs to.
2552 //
2553 // E.g. Translate following interleaved load group (factor = 3):
2554 //   for (i = 0; i < N; i+=3) {
2555 //     R = Pic[i];             // Member of index 0
2556 //     G = Pic[i+1];           // Member of index 1
2557 //     B = Pic[i+2];           // Member of index 2
2558 //     ... // do something to R, G, B
2559 //   }
2560 // To:
2561 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2562 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2563 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2564 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2565 //
2566 // Or translate following interleaved store group (factor = 3):
2567 //   for (i = 0; i < N; i+=3) {
2568 //     ... do something to R, G, B
2569 //     Pic[i]   = R;           // Member of index 0
2570 //     Pic[i+1] = G;           // Member of index 1
2571 //     Pic[i+2] = B;           // Member of index 2
2572 //   }
2573 // To:
2574 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2575 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2576 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2577 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2578 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2579 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2580     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2581     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2582     VPValue *BlockInMask) {
2583   Instruction *Instr = Group->getInsertPos();
2584   const DataLayout &DL = Instr->getModule()->getDataLayout();
2585 
2586   // Prepare for the vector type of the interleaved load/store.
2587   Type *ScalarTy = getLoadStoreType(Instr);
2588   unsigned InterleaveFactor = Group->getFactor();
2589   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2590   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2591 
2592   // Prepare for the new pointers.
2593   SmallVector<Value *, 2> AddrParts;
2594   unsigned Index = Group->getIndex(Instr);
2595 
2596   // TODO: extend the masked interleaved-group support to reversed access.
2597   assert((!BlockInMask || !Group->isReverse()) &&
2598          "Reversed masked interleave-group not supported.");
2599 
2600   // If the group is reverse, adjust the index to refer to the last vector lane
2601   // instead of the first. We adjust the index from the first vector lane,
2602   // rather than directly getting the pointer for lane VF - 1, because the
2603   // pointer operand of the interleaved access is supposed to be uniform. For
2604   // uniform instructions, we're only required to generate a value for the
2605   // first vector lane in each unroll iteration.
2606   if (Group->isReverse())
2607     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2608 
2609   for (unsigned Part = 0; Part < UF; Part++) {
2610     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2611     setDebugLocFromInst(AddrPart);
2612 
2613     // Notice current instruction could be any index. Need to adjust the address
2614     // to the member of index 0.
2615     //
2616     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2617     //       b = A[i];       // Member of index 0
2618     // Current pointer is pointed to A[i+1], adjust it to A[i].
2619     //
2620     // E.g.  A[i+1] = a;     // Member of index 1
2621     //       A[i]   = b;     // Member of index 0
2622     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2623     // Current pointer is pointed to A[i+2], adjust it to A[i].
2624 
2625     bool InBounds = false;
2626     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2627       InBounds = gep->isInBounds();
2628     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2629     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2630 
2631     // Cast to the vector pointer type.
2632     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2633     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2634     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2635   }
2636 
2637   setDebugLocFromInst(Instr);
2638   Value *PoisonVec = PoisonValue::get(VecTy);
2639 
2640   Value *MaskForGaps = nullptr;
2641   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2642     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2643     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2644   }
2645 
2646   // Vectorize the interleaved load group.
2647   if (isa<LoadInst>(Instr)) {
2648     // For each unroll part, create a wide load for the group.
2649     SmallVector<Value *, 2> NewLoads;
2650     for (unsigned Part = 0; Part < UF; Part++) {
2651       Instruction *NewLoad;
2652       if (BlockInMask || MaskForGaps) {
2653         assert(useMaskedInterleavedAccesses(*TTI) &&
2654                "masked interleaved groups are not allowed.");
2655         Value *GroupMask = MaskForGaps;
2656         if (BlockInMask) {
2657           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2658           Value *ShuffledMask = Builder.CreateShuffleVector(
2659               BlockInMaskPart,
2660               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2661               "interleaved.mask");
2662           GroupMask = MaskForGaps
2663                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2664                                                 MaskForGaps)
2665                           : ShuffledMask;
2666         }
2667         NewLoad =
2668             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2669                                      GroupMask, PoisonVec, "wide.masked.vec");
2670       }
2671       else
2672         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2673                                             Group->getAlign(), "wide.vec");
2674       Group->addMetadata(NewLoad);
2675       NewLoads.push_back(NewLoad);
2676     }
2677 
2678     // For each member in the group, shuffle out the appropriate data from the
2679     // wide loads.
2680     unsigned J = 0;
2681     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2682       Instruction *Member = Group->getMember(I);
2683 
2684       // Skip the gaps in the group.
2685       if (!Member)
2686         continue;
2687 
2688       auto StrideMask =
2689           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2690       for (unsigned Part = 0; Part < UF; Part++) {
2691         Value *StridedVec = Builder.CreateShuffleVector(
2692             NewLoads[Part], StrideMask, "strided.vec");
2693 
2694         // If this member has different type, cast the result type.
2695         if (Member->getType() != ScalarTy) {
2696           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2697           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2698           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2699         }
2700 
2701         if (Group->isReverse())
2702           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2703 
2704         State.set(VPDefs[J], StridedVec, Part);
2705       }
2706       ++J;
2707     }
2708     return;
2709   }
2710 
2711   // The sub vector type for current instruction.
2712   auto *SubVT = VectorType::get(ScalarTy, VF);
2713 
2714   // Vectorize the interleaved store group.
2715   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2716   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2717          "masked interleaved groups are not allowed.");
2718   assert((!MaskForGaps || !VF.isScalable()) &&
2719          "masking gaps for scalable vectors is not yet supported.");
2720   for (unsigned Part = 0; Part < UF; Part++) {
2721     // Collect the stored vector from each member.
2722     SmallVector<Value *, 4> StoredVecs;
2723     for (unsigned i = 0; i < InterleaveFactor; i++) {
2724       assert((Group->getMember(i) || MaskForGaps) &&
2725              "Fail to get a member from an interleaved store group");
2726       Instruction *Member = Group->getMember(i);
2727 
2728       // Skip the gaps in the group.
2729       if (!Member) {
2730         Value *Undef = PoisonValue::get(SubVT);
2731         StoredVecs.push_back(Undef);
2732         continue;
2733       }
2734 
2735       Value *StoredVec = State.get(StoredValues[i], Part);
2736 
2737       if (Group->isReverse())
2738         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2739 
2740       // If this member has different type, cast it to a unified type.
2741 
2742       if (StoredVec->getType() != SubVT)
2743         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2744 
2745       StoredVecs.push_back(StoredVec);
2746     }
2747 
2748     // Concatenate all vectors into a wide vector.
2749     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2750 
2751     // Interleave the elements in the wide vector.
2752     Value *IVec = Builder.CreateShuffleVector(
2753         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2754         "interleaved.vec");
2755 
2756     Instruction *NewStoreInstr;
2757     if (BlockInMask || MaskForGaps) {
2758       Value *GroupMask = MaskForGaps;
2759       if (BlockInMask) {
2760         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2761         Value *ShuffledMask = Builder.CreateShuffleVector(
2762             BlockInMaskPart,
2763             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2764             "interleaved.mask");
2765         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2766                                                       ShuffledMask, MaskForGaps)
2767                                 : ShuffledMask;
2768       }
2769       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2770                                                 Group->getAlign(), GroupMask);
2771     } else
2772       NewStoreInstr =
2773           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2774 
2775     Group->addMetadata(NewStoreInstr);
2776   }
2777 }
2778 
2779 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2780                                                VPReplicateRecipe *RepRecipe,
2781                                                const VPIteration &Instance,
2782                                                bool IfPredicateInstr,
2783                                                VPTransformState &State) {
2784   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2785 
2786   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2787   // the first lane and part.
2788   if (isa<NoAliasScopeDeclInst>(Instr))
2789     if (!Instance.isFirstIteration())
2790       return;
2791 
2792   setDebugLocFromInst(Instr);
2793 
2794   // Does this instruction return a value ?
2795   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2796 
2797   Instruction *Cloned = Instr->clone();
2798   if (!IsVoidRetTy)
2799     Cloned->setName(Instr->getName() + ".cloned");
2800 
2801   // If the scalarized instruction contributes to the address computation of a
2802   // widen masked load/store which was in a basic block that needed predication
2803   // and is not predicated after vectorization, we can't propagate
2804   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2805   // instruction could feed a poison value to the base address of the widen
2806   // load/store.
2807   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2808     Cloned->dropPoisonGeneratingFlags();
2809 
2810   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2811                                Builder.GetInsertPoint());
2812   // Replace the operands of the cloned instructions with their scalar
2813   // equivalents in the new loop.
2814   for (auto &I : enumerate(RepRecipe->operands())) {
2815     auto InputInstance = Instance;
2816     VPValue *Operand = I.value();
2817     VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand);
2818     if (OperandR && OperandR->isUniform())
2819       InputInstance.Lane = VPLane::getFirstLane();
2820     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2821   }
2822   addNewMetadata(Cloned, Instr);
2823 
2824   // Place the cloned scalar in the new loop.
2825   Builder.Insert(Cloned);
2826 
2827   State.set(RepRecipe, Cloned, Instance);
2828 
2829   // If we just cloned a new assumption, add it the assumption cache.
2830   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2831     AC->registerAssumption(II);
2832 
2833   // End if-block.
2834   if (IfPredicateInstr)
2835     PredicatedInstructions.push_back(Cloned);
2836 }
2837 
2838 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
2839   BasicBlock *Header = L->getHeader();
2840   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
2841 
2842   IRBuilder<> B(Header->getTerminator());
2843   Instruction *OldInst =
2844       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
2845   setDebugLocFromInst(OldInst, &B);
2846 
2847   // Connect the header to the exit and header blocks and replace the old
2848   // terminator.
2849   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
2850 
2851   // Now we have two terminators. Remove the old one from the block.
2852   Header->getTerminator()->eraseFromParent();
2853 }
2854 
2855 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) {
2856   if (TripCount)
2857     return TripCount;
2858 
2859   assert(InsertBlock);
2860   IRBuilder<> Builder(InsertBlock->getTerminator());
2861   // Find the loop boundaries.
2862   ScalarEvolution *SE = PSE.getSE();
2863   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2864   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2865          "Invalid loop count");
2866 
2867   Type *IdxTy = Legal->getWidestInductionType();
2868   assert(IdxTy && "No type for induction");
2869 
2870   // The exit count might have the type of i64 while the phi is i32. This can
2871   // happen if we have an induction variable that is sign extended before the
2872   // compare. The only way that we get a backedge taken count is that the
2873   // induction variable was signed and as such will not overflow. In such a case
2874   // truncation is legal.
2875   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2876       IdxTy->getPrimitiveSizeInBits())
2877     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2878   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2879 
2880   // Get the total trip count from the count by adding 1.
2881   const SCEV *ExitCount = SE->getAddExpr(
2882       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2883 
2884   const DataLayout &DL = InsertBlock->getModule()->getDataLayout();
2885 
2886   // Expand the trip count and place the new instructions in the preheader.
2887   // Notice that the pre-header does not change, only the loop body.
2888   SCEVExpander Exp(*SE, DL, "induction");
2889 
2890   // Count holds the overall loop count (N).
2891   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2892                                 InsertBlock->getTerminator());
2893 
2894   if (TripCount->getType()->isPointerTy())
2895     TripCount =
2896         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2897                                     InsertBlock->getTerminator());
2898 
2899   return TripCount;
2900 }
2901 
2902 Value *
2903 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) {
2904   if (VectorTripCount)
2905     return VectorTripCount;
2906 
2907   Value *TC = getOrCreateTripCount(InsertBlock);
2908   IRBuilder<> Builder(InsertBlock->getTerminator());
2909 
2910   Type *Ty = TC->getType();
2911   // This is where we can make the step a runtime constant.
2912   Value *Step = createStepForVF(Builder, Ty, VF, UF);
2913 
2914   // If the tail is to be folded by masking, round the number of iterations N
2915   // up to a multiple of Step instead of rounding down. This is done by first
2916   // adding Step-1 and then rounding down. Note that it's ok if this addition
2917   // overflows: the vector induction variable will eventually wrap to zero given
2918   // that it starts at zero and its Step is a power of two; the loop will then
2919   // exit, with the last early-exit vector comparison also producing all-true.
2920   if (Cost->foldTailByMasking()) {
2921     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2922            "VF*UF must be a power of 2 when folding tail by masking");
2923     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
2924     TC = Builder.CreateAdd(
2925         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
2926   }
2927 
2928   // Now we need to generate the expression for the part of the loop that the
2929   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2930   // iterations are not required for correctness, or N - Step, otherwise. Step
2931   // is equal to the vectorization factor (number of SIMD elements) times the
2932   // unroll factor (number of SIMD instructions).
2933   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2934 
2935   // There are cases where we *must* run at least one iteration in the remainder
2936   // loop.  See the cost model for when this can happen.  If the step evenly
2937   // divides the trip count, we set the remainder to be equal to the step. If
2938   // the step does not evenly divide the trip count, no adjustment is necessary
2939   // since there will already be scalar iterations. Note that the minimum
2940   // iterations check ensures that N >= Step.
2941   if (Cost->requiresScalarEpilogue(VF)) {
2942     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2943     R = Builder.CreateSelect(IsZero, Step, R);
2944   }
2945 
2946   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2947 
2948   return VectorTripCount;
2949 }
2950 
2951 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2952                                                    const DataLayout &DL) {
2953   // Verify that V is a vector type with same number of elements as DstVTy.
2954   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2955   unsigned VF = DstFVTy->getNumElements();
2956   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2957   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2958   Type *SrcElemTy = SrcVecTy->getElementType();
2959   Type *DstElemTy = DstFVTy->getElementType();
2960   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2961          "Vector elements must have same size");
2962 
2963   // Do a direct cast if element types are castable.
2964   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2965     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2966   }
2967   // V cannot be directly casted to desired vector type.
2968   // May happen when V is a floating point vector but DstVTy is a vector of
2969   // pointers or vice-versa. Handle this using a two-step bitcast using an
2970   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2971   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2972          "Only one type should be a pointer type");
2973   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2974          "Only one type should be a floating point type");
2975   Type *IntTy =
2976       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2977   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2978   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2979   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2980 }
2981 
2982 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) {
2983   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
2984   // Reuse existing vector loop preheader for TC checks.
2985   // Note that new preheader block is generated for vector loop.
2986   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2987   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2988 
2989   // Generate code to check if the loop's trip count is less than VF * UF, or
2990   // equal to it in case a scalar epilogue is required; this implies that the
2991   // vector trip count is zero. This check also covers the case where adding one
2992   // to the backedge-taken count overflowed leading to an incorrect trip count
2993   // of zero. In this case we will also jump to the scalar loop.
2994   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
2995                                             : ICmpInst::ICMP_ULT;
2996 
2997   // If tail is to be folded, vector loop takes care of all iterations.
2998   Value *CheckMinIters = Builder.getFalse();
2999   if (!Cost->foldTailByMasking()) {
3000     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3001     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3002   }
3003   // Create new preheader for vector loop.
3004   LoopVectorPreHeader =
3005       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3006                  "vector.ph");
3007 
3008   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3009                                DT->getNode(Bypass)->getIDom()) &&
3010          "TC check is expected to dominate Bypass");
3011 
3012   // Update dominator for Bypass & LoopExit (if needed).
3013   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3014   if (!Cost->requiresScalarEpilogue(VF))
3015     // If there is an epilogue which must run, there's no edge from the
3016     // middle block to exit blocks  and thus no need to update the immediate
3017     // dominator of the exit blocks.
3018     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3019 
3020   ReplaceInstWithInst(
3021       TCCheckBlock->getTerminator(),
3022       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3023   LoopBypassBlocks.push_back(TCCheckBlock);
3024 }
3025 
3026 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
3027 
3028   BasicBlock *const SCEVCheckBlock =
3029       RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock);
3030   if (!SCEVCheckBlock)
3031     return nullptr;
3032 
3033   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3034            (OptForSizeBasedOnProfile &&
3035             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3036          "Cannot SCEV check stride or overflow when optimizing for size");
3037 
3038 
3039   // Update dominator only if this is first RT check.
3040   if (LoopBypassBlocks.empty()) {
3041     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3042     if (!Cost->requiresScalarEpilogue(VF))
3043       // If there is an epilogue which must run, there's no edge from the
3044       // middle block to exit blocks  and thus no need to update the immediate
3045       // dominator of the exit blocks.
3046       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3047   }
3048 
3049   LoopBypassBlocks.push_back(SCEVCheckBlock);
3050   AddedSafetyChecks = true;
3051   return SCEVCheckBlock;
3052 }
3053 
3054 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
3055   // VPlan-native path does not do any analysis for runtime checks currently.
3056   if (EnableVPlanNativePath)
3057     return nullptr;
3058 
3059   BasicBlock *const MemCheckBlock =
3060       RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader);
3061 
3062   // Check if we generated code that checks in runtime if arrays overlap. We put
3063   // the checks into a separate block to make the more common case of few
3064   // elements faster.
3065   if (!MemCheckBlock)
3066     return nullptr;
3067 
3068   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3069     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3070            "Cannot emit memory checks when optimizing for size, unless forced "
3071            "to vectorize.");
3072     ORE->emit([&]() {
3073       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3074                                         OrigLoop->getStartLoc(),
3075                                         OrigLoop->getHeader())
3076              << "Code-size may be reduced by not forcing "
3077                 "vectorization, or by source-code modifications "
3078                 "eliminating the need for runtime checks "
3079                 "(e.g., adding 'restrict').";
3080     });
3081   }
3082 
3083   LoopBypassBlocks.push_back(MemCheckBlock);
3084 
3085   AddedSafetyChecks = true;
3086 
3087   // We currently don't use LoopVersioning for the actual loop cloning but we
3088   // still use it to add the noalias metadata.
3089   LVer = std::make_unique<LoopVersioning>(
3090       *Legal->getLAI(),
3091       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3092       DT, PSE.getSE());
3093   LVer->prepareNoAliasMetadata();
3094   return MemCheckBlock;
3095 }
3096 
3097 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3098   LoopScalarBody = OrigLoop->getHeader();
3099   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3100   assert(LoopVectorPreHeader && "Invalid loop structure");
3101   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3102   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3103          "multiple exit loop without required epilogue?");
3104 
3105   LoopMiddleBlock =
3106       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3107                  LI, nullptr, Twine(Prefix) + "middle.block");
3108   LoopScalarPreHeader =
3109       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3110                  nullptr, Twine(Prefix) + "scalar.ph");
3111 
3112   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3113 
3114   // Set up the middle block terminator.  Two cases:
3115   // 1) If we know that we must execute the scalar epilogue, emit an
3116   //    unconditional branch.
3117   // 2) Otherwise, we must have a single unique exit block (due to how we
3118   //    implement the multiple exit case).  In this case, set up a conditonal
3119   //    branch from the middle block to the loop scalar preheader, and the
3120   //    exit block.  completeLoopSkeleton will update the condition to use an
3121   //    iteration check, if required to decide whether to execute the remainder.
3122   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3123     BranchInst::Create(LoopScalarPreHeader) :
3124     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3125                        Builder.getTrue());
3126   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3127   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3128 
3129   // We intentionally don't let SplitBlock to update LoopInfo since
3130   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3131   // LoopVectorBody is explicitly added to the correct place few lines later.
3132   BasicBlock *LoopVectorBody =
3133       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3134                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3135 
3136   // Update dominator for loop exit.
3137   if (!Cost->requiresScalarEpilogue(VF))
3138     // If there is an epilogue which must run, there's no edge from the
3139     // middle block to exit blocks  and thus no need to update the immediate
3140     // dominator of the exit blocks.
3141     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3142 
3143   // Create and register the new vector loop.
3144   Loop *Lp = LI->AllocateLoop();
3145   Loop *ParentLoop = OrigLoop->getParentLoop();
3146 
3147   // Insert the new loop into the loop nest and register the new basic blocks
3148   // before calling any utilities such as SCEV that require valid LoopInfo.
3149   if (ParentLoop) {
3150     ParentLoop->addChildLoop(Lp);
3151   } else {
3152     LI->addTopLevelLoop(Lp);
3153   }
3154   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3155   return Lp;
3156 }
3157 
3158 void InnerLoopVectorizer::createInductionResumeValues(
3159     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3160   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3161           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3162          "Inconsistent information about additional bypass.");
3163 
3164   Value *VectorTripCount = getOrCreateVectorTripCount(L->getLoopPreheader());
3165   assert(VectorTripCount && L && "Expected valid arguments");
3166   // We are going to resume the execution of the scalar loop.
3167   // Go over all of the induction variables that we found and fix the
3168   // PHIs that are left in the scalar version of the loop.
3169   // The starting values of PHI nodes depend on the counter of the last
3170   // iteration in the vectorized loop.
3171   // If we come from a bypass edge then we need to start from the original
3172   // start value.
3173   Instruction *OldInduction = Legal->getPrimaryInduction();
3174   for (auto &InductionEntry : Legal->getInductionVars()) {
3175     PHINode *OrigPhi = InductionEntry.first;
3176     InductionDescriptor II = InductionEntry.second;
3177 
3178     // Create phi nodes to merge from the  backedge-taken check block.
3179     PHINode *BCResumeVal =
3180         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3181                         LoopScalarPreHeader->getTerminator());
3182     // Copy original phi DL over to the new one.
3183     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3184     Value *&EndValue = IVEndValues[OrigPhi];
3185     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3186     if (OrigPhi == OldInduction) {
3187       // We know what the end value is.
3188       EndValue = VectorTripCount;
3189     } else {
3190       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3191 
3192       // Fast-math-flags propagate from the original induction instruction.
3193       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3194         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3195 
3196       Type *StepType = II.getStep()->getType();
3197       Instruction::CastOps CastOp =
3198           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3199       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3200       Value *Step =
3201           CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3202       EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3203       EndValue->setName("ind.end");
3204 
3205       // Compute the end value for the additional bypass (if applicable).
3206       if (AdditionalBypass.first) {
3207         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3208         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3209                                          StepType, true);
3210         Value *Step =
3211             CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3212         CRD =
3213             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3214         EndValueFromAdditionalBypass =
3215             emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3216         EndValueFromAdditionalBypass->setName("ind.end");
3217       }
3218     }
3219     // The new PHI merges the original incoming value, in case of a bypass,
3220     // or the value at the end of the vectorized loop.
3221     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3222 
3223     // Fix the scalar body counter (PHI node).
3224     // The old induction's phi node in the scalar body needs the truncated
3225     // value.
3226     for (BasicBlock *BB : LoopBypassBlocks)
3227       BCResumeVal->addIncoming(II.getStartValue(), BB);
3228 
3229     if (AdditionalBypass.first)
3230       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3231                                             EndValueFromAdditionalBypass);
3232 
3233     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3234   }
3235 }
3236 
3237 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3238                                                       MDNode *OrigLoopID) {
3239   assert(L && "Expected valid loop.");
3240 
3241   // The trip counts should be cached by now.
3242   Value *Count = getOrCreateTripCount(L->getLoopPreheader());
3243   Value *VectorTripCount = getOrCreateVectorTripCount(L->getLoopPreheader());
3244 
3245   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3246 
3247   // Add a check in the middle block to see if we have completed
3248   // all of the iterations in the first vector loop.  Three cases:
3249   // 1) If we require a scalar epilogue, there is no conditional branch as
3250   //    we unconditionally branch to the scalar preheader.  Do nothing.
3251   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3252   //    Thus if tail is to be folded, we know we don't need to run the
3253   //    remainder and we can use the previous value for the condition (true).
3254   // 3) Otherwise, construct a runtime check.
3255   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3256     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3257                                         Count, VectorTripCount, "cmp.n",
3258                                         LoopMiddleBlock->getTerminator());
3259 
3260     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3261     // of the corresponding compare because they may have ended up with
3262     // different line numbers and we want to avoid awkward line stepping while
3263     // debugging. Eg. if the compare has got a line number inside the loop.
3264     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3265     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3266   }
3267 
3268   // Get ready to start creating new instructions into the vectorized body.
3269   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3270          "Inconsistent vector loop preheader");
3271 
3272 #ifdef EXPENSIVE_CHECKS
3273   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3274   LI->verify(*DT);
3275 #endif
3276 
3277   return LoopVectorPreHeader;
3278 }
3279 
3280 std::pair<BasicBlock *, Value *>
3281 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3282   /*
3283    In this function we generate a new loop. The new loop will contain
3284    the vectorized instructions while the old loop will continue to run the
3285    scalar remainder.
3286 
3287        [ ] <-- loop iteration number check.
3288     /   |
3289    /    v
3290   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3291   |  /  |
3292   | /   v
3293   ||   [ ]     <-- vector pre header.
3294   |/    |
3295   |     v
3296   |    [  ] \
3297   |    [  ]_|   <-- vector loop.
3298   |     |
3299   |     v
3300   \   -[ ]   <--- middle-block.
3301    \/   |
3302    /\   v
3303    | ->[ ]     <--- new preheader.
3304    |    |
3305  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3306    |   [ ] \
3307    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3308     \   |
3309      \  v
3310       >[ ]     <-- exit block(s).
3311    ...
3312    */
3313 
3314   // Get the metadata of the original loop before it gets modified.
3315   MDNode *OrigLoopID = OrigLoop->getLoopID();
3316 
3317   // Workaround!  Compute the trip count of the original loop and cache it
3318   // before we start modifying the CFG.  This code has a systemic problem
3319   // wherein it tries to run analysis over partially constructed IR; this is
3320   // wrong, and not simply for SCEV.  The trip count of the original loop
3321   // simply happens to be prone to hitting this in practice.  In theory, we
3322   // can hit the same issue for any SCEV, or ValueTracking query done during
3323   // mutation.  See PR49900.
3324   getOrCreateTripCount(OrigLoop->getLoopPreheader());
3325 
3326   // Create an empty vector loop, and prepare basic blocks for the runtime
3327   // checks.
3328   Loop *Lp = createVectorLoopSkeleton("");
3329 
3330   // Now, compare the new count to zero. If it is zero skip the vector loop and
3331   // jump to the scalar loop. This check also covers the case where the
3332   // backedge-taken count is uint##_max: adding one to it will overflow leading
3333   // to an incorrect trip count of zero. In this (rare) case we will also jump
3334   // to the scalar loop.
3335   emitMinimumIterationCountCheck(LoopScalarPreHeader);
3336 
3337   // Generate the code to check any assumptions that we've made for SCEV
3338   // expressions.
3339   emitSCEVChecks(LoopScalarPreHeader);
3340 
3341   // Generate the code that checks in runtime if arrays overlap. We put the
3342   // checks into a separate block to make the more common case of few elements
3343   // faster.
3344   emitMemRuntimeChecks(LoopScalarPreHeader);
3345 
3346   createHeaderBranch(Lp);
3347 
3348   // Emit phis for the new starting index of the scalar loop.
3349   createInductionResumeValues(Lp);
3350 
3351   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3352 }
3353 
3354 // Fix up external users of the induction variable. At this point, we are
3355 // in LCSSA form, with all external PHIs that use the IV having one input value,
3356 // coming from the remainder loop. We need those PHIs to also have a correct
3357 // value for the IV when arriving directly from the middle block.
3358 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3359                                        const InductionDescriptor &II,
3360                                        Value *CountRoundDown, Value *EndValue,
3361                                        BasicBlock *MiddleBlock,
3362                                        BasicBlock *VectorHeader) {
3363   // There are two kinds of external IV usages - those that use the value
3364   // computed in the last iteration (the PHI) and those that use the penultimate
3365   // value (the value that feeds into the phi from the loop latch).
3366   // We allow both, but they, obviously, have different values.
3367 
3368   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3369 
3370   DenseMap<Value *, Value *> MissingVals;
3371 
3372   // An external user of the last iteration's value should see the value that
3373   // the remainder loop uses to initialize its own IV.
3374   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3375   for (User *U : PostInc->users()) {
3376     Instruction *UI = cast<Instruction>(U);
3377     if (!OrigLoop->contains(UI)) {
3378       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3379       MissingVals[UI] = EndValue;
3380     }
3381   }
3382 
3383   // An external user of the penultimate value need to see EndValue - Step.
3384   // The simplest way to get this is to recompute it from the constituent SCEVs,
3385   // that is Start + (Step * (CRD - 1)).
3386   for (User *U : OrigPhi->users()) {
3387     auto *UI = cast<Instruction>(U);
3388     if (!OrigLoop->contains(UI)) {
3389       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3390 
3391       IRBuilder<> B(MiddleBlock->getTerminator());
3392 
3393       // Fast-math-flags propagate from the original induction instruction.
3394       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3395         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3396 
3397       Value *CountMinusOne = B.CreateSub(
3398           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3399       Value *CMO =
3400           !II.getStep()->getType()->isIntegerTy()
3401               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3402                              II.getStep()->getType())
3403               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3404       CMO->setName("cast.cmo");
3405 
3406       Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
3407                                     VectorHeader->getTerminator());
3408       Value *Escape =
3409           emitTransformedIndex(B, CMO, II.getStartValue(), Step, II);
3410       Escape->setName("ind.escape");
3411       MissingVals[UI] = Escape;
3412     }
3413   }
3414 
3415   for (auto &I : MissingVals) {
3416     PHINode *PHI = cast<PHINode>(I.first);
3417     // One corner case we have to handle is two IVs "chasing" each-other,
3418     // that is %IV2 = phi [...], [ %IV1, %latch ]
3419     // In this case, if IV1 has an external use, we need to avoid adding both
3420     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3421     // don't already have an incoming value for the middle block.
3422     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3423       PHI->addIncoming(I.second, MiddleBlock);
3424   }
3425 }
3426 
3427 namespace {
3428 
3429 struct CSEDenseMapInfo {
3430   static bool canHandle(const Instruction *I) {
3431     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3432            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3433   }
3434 
3435   static inline Instruction *getEmptyKey() {
3436     return DenseMapInfo<Instruction *>::getEmptyKey();
3437   }
3438 
3439   static inline Instruction *getTombstoneKey() {
3440     return DenseMapInfo<Instruction *>::getTombstoneKey();
3441   }
3442 
3443   static unsigned getHashValue(const Instruction *I) {
3444     assert(canHandle(I) && "Unknown instruction!");
3445     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3446                                                            I->value_op_end()));
3447   }
3448 
3449   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3450     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3451         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3452       return LHS == RHS;
3453     return LHS->isIdenticalTo(RHS);
3454   }
3455 };
3456 
3457 } // end anonymous namespace
3458 
3459 ///Perform cse of induction variable instructions.
3460 static void cse(BasicBlock *BB) {
3461   // Perform simple cse.
3462   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3463   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3464     if (!CSEDenseMapInfo::canHandle(&In))
3465       continue;
3466 
3467     // Check if we can replace this instruction with any of the
3468     // visited instructions.
3469     if (Instruction *V = CSEMap.lookup(&In)) {
3470       In.replaceAllUsesWith(V);
3471       In.eraseFromParent();
3472       continue;
3473     }
3474 
3475     CSEMap[&In] = &In;
3476   }
3477 }
3478 
3479 InstructionCost
3480 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3481                                               bool &NeedToScalarize) const {
3482   Function *F = CI->getCalledFunction();
3483   Type *ScalarRetTy = CI->getType();
3484   SmallVector<Type *, 4> Tys, ScalarTys;
3485   for (auto &ArgOp : CI->args())
3486     ScalarTys.push_back(ArgOp->getType());
3487 
3488   // Estimate cost of scalarized vector call. The source operands are assumed
3489   // to be vectors, so we need to extract individual elements from there,
3490   // execute VF scalar calls, and then gather the result into the vector return
3491   // value.
3492   InstructionCost ScalarCallCost =
3493       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3494   if (VF.isScalar())
3495     return ScalarCallCost;
3496 
3497   // Compute corresponding vector type for return value and arguments.
3498   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3499   for (Type *ScalarTy : ScalarTys)
3500     Tys.push_back(ToVectorTy(ScalarTy, VF));
3501 
3502   // Compute costs of unpacking argument values for the scalar calls and
3503   // packing the return values to a vector.
3504   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3505 
3506   InstructionCost Cost =
3507       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3508 
3509   // If we can't emit a vector call for this function, then the currently found
3510   // cost is the cost we need to return.
3511   NeedToScalarize = true;
3512   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3513   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3514 
3515   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3516     return Cost;
3517 
3518   // If the corresponding vector cost is cheaper, return its cost.
3519   InstructionCost VectorCallCost =
3520       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3521   if (VectorCallCost < Cost) {
3522     NeedToScalarize = false;
3523     Cost = VectorCallCost;
3524   }
3525   return Cost;
3526 }
3527 
3528 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3529   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3530     return Elt;
3531   return VectorType::get(Elt, VF);
3532 }
3533 
3534 InstructionCost
3535 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3536                                                    ElementCount VF) const {
3537   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3538   assert(ID && "Expected intrinsic call!");
3539   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3540   FastMathFlags FMF;
3541   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3542     FMF = FPMO->getFastMathFlags();
3543 
3544   SmallVector<const Value *> Arguments(CI->args());
3545   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3546   SmallVector<Type *> ParamTys;
3547   std::transform(FTy->param_begin(), FTy->param_end(),
3548                  std::back_inserter(ParamTys),
3549                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3550 
3551   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3552                                     dyn_cast<IntrinsicInst>(CI));
3553   return TTI.getIntrinsicInstrCost(CostAttrs,
3554                                    TargetTransformInfo::TCK_RecipThroughput);
3555 }
3556 
3557 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3558   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3559   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3560   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3561 }
3562 
3563 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3564   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3565   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3566   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3567 }
3568 
3569 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3570   // For every instruction `I` in MinBWs, truncate the operands, create a
3571   // truncated version of `I` and reextend its result. InstCombine runs
3572   // later and will remove any ext/trunc pairs.
3573   SmallPtrSet<Value *, 4> Erased;
3574   for (const auto &KV : Cost->getMinimalBitwidths()) {
3575     // If the value wasn't vectorized, we must maintain the original scalar
3576     // type. The absence of the value from State indicates that it
3577     // wasn't vectorized.
3578     // FIXME: Should not rely on getVPValue at this point.
3579     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3580     if (!State.hasAnyVectorValue(Def))
3581       continue;
3582     for (unsigned Part = 0; Part < UF; ++Part) {
3583       Value *I = State.get(Def, Part);
3584       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3585         continue;
3586       Type *OriginalTy = I->getType();
3587       Type *ScalarTruncatedTy =
3588           IntegerType::get(OriginalTy->getContext(), KV.second);
3589       auto *TruncatedTy = VectorType::get(
3590           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3591       if (TruncatedTy == OriginalTy)
3592         continue;
3593 
3594       IRBuilder<> B(cast<Instruction>(I));
3595       auto ShrinkOperand = [&](Value *V) -> Value * {
3596         if (auto *ZI = dyn_cast<ZExtInst>(V))
3597           if (ZI->getSrcTy() == TruncatedTy)
3598             return ZI->getOperand(0);
3599         return B.CreateZExtOrTrunc(V, TruncatedTy);
3600       };
3601 
3602       // The actual instruction modification depends on the instruction type,
3603       // unfortunately.
3604       Value *NewI = nullptr;
3605       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3606         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3607                              ShrinkOperand(BO->getOperand(1)));
3608 
3609         // Any wrapping introduced by shrinking this operation shouldn't be
3610         // considered undefined behavior. So, we can't unconditionally copy
3611         // arithmetic wrapping flags to NewI.
3612         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3613       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3614         NewI =
3615             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3616                          ShrinkOperand(CI->getOperand(1)));
3617       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3618         NewI = B.CreateSelect(SI->getCondition(),
3619                               ShrinkOperand(SI->getTrueValue()),
3620                               ShrinkOperand(SI->getFalseValue()));
3621       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3622         switch (CI->getOpcode()) {
3623         default:
3624           llvm_unreachable("Unhandled cast!");
3625         case Instruction::Trunc:
3626           NewI = ShrinkOperand(CI->getOperand(0));
3627           break;
3628         case Instruction::SExt:
3629           NewI = B.CreateSExtOrTrunc(
3630               CI->getOperand(0),
3631               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3632           break;
3633         case Instruction::ZExt:
3634           NewI = B.CreateZExtOrTrunc(
3635               CI->getOperand(0),
3636               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3637           break;
3638         }
3639       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3640         auto Elements0 =
3641             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3642         auto *O0 = B.CreateZExtOrTrunc(
3643             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3644         auto Elements1 =
3645             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3646         auto *O1 = B.CreateZExtOrTrunc(
3647             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3648 
3649         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3650       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3651         // Don't do anything with the operands, just extend the result.
3652         continue;
3653       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3654         auto Elements =
3655             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3656         auto *O0 = B.CreateZExtOrTrunc(
3657             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3658         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3659         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3660       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3661         auto Elements =
3662             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3663         auto *O0 = B.CreateZExtOrTrunc(
3664             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3665         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3666       } else {
3667         // If we don't know what to do, be conservative and don't do anything.
3668         continue;
3669       }
3670 
3671       // Lastly, extend the result.
3672       NewI->takeName(cast<Instruction>(I));
3673       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3674       I->replaceAllUsesWith(Res);
3675       cast<Instruction>(I)->eraseFromParent();
3676       Erased.insert(I);
3677       State.reset(Def, Res, Part);
3678     }
3679   }
3680 
3681   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3682   for (const auto &KV : Cost->getMinimalBitwidths()) {
3683     // If the value wasn't vectorized, we must maintain the original scalar
3684     // type. The absence of the value from State indicates that it
3685     // wasn't vectorized.
3686     // FIXME: Should not rely on getVPValue at this point.
3687     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3688     if (!State.hasAnyVectorValue(Def))
3689       continue;
3690     for (unsigned Part = 0; Part < UF; ++Part) {
3691       Value *I = State.get(Def, Part);
3692       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3693       if (Inst && Inst->use_empty()) {
3694         Value *NewI = Inst->getOperand(0);
3695         Inst->eraseFromParent();
3696         State.reset(Def, NewI, Part);
3697       }
3698     }
3699   }
3700 }
3701 
3702 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3703   // Insert truncates and extends for any truncated instructions as hints to
3704   // InstCombine.
3705   if (VF.isVector())
3706     truncateToMinimalBitwidths(State);
3707 
3708   // Fix widened non-induction PHIs by setting up the PHI operands.
3709   if (OrigPHIsToFix.size()) {
3710     assert(EnableVPlanNativePath &&
3711            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3712     fixNonInductionPHIs(State);
3713   }
3714 
3715   // At this point every instruction in the original loop is widened to a
3716   // vector form. Now we need to fix the recurrences in the loop. These PHI
3717   // nodes are currently empty because we did not want to introduce cycles.
3718   // This is the second stage of vectorizing recurrences.
3719   fixCrossIterationPHIs(State);
3720 
3721   // Forget the original basic block.
3722   PSE.getSE()->forgetLoop(OrigLoop);
3723 
3724   Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB);
3725   // If we inserted an edge from the middle block to the unique exit block,
3726   // update uses outside the loop (phis) to account for the newly inserted
3727   // edge.
3728   if (!Cost->requiresScalarEpilogue(VF)) {
3729     // Fix-up external users of the induction variables.
3730     for (auto &Entry : Legal->getInductionVars())
3731       fixupIVUsers(Entry.first, Entry.second,
3732                    getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()),
3733                    IVEndValues[Entry.first], LoopMiddleBlock,
3734                    VectorLoop->getHeader());
3735 
3736     fixLCSSAPHIs(State);
3737   }
3738 
3739   for (Instruction *PI : PredicatedInstructions)
3740     sinkScalarOperands(&*PI);
3741 
3742   // Remove redundant induction instructions.
3743   cse(VectorLoop->getHeader());
3744 
3745   // Set/update profile weights for the vector and remainder loops as original
3746   // loop iterations are now distributed among them. Note that original loop
3747   // represented by LoopScalarBody becomes remainder loop after vectorization.
3748   //
3749   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3750   // end up getting slightly roughened result but that should be OK since
3751   // profile is not inherently precise anyway. Note also possible bypass of
3752   // vector code caused by legality checks is ignored, assigning all the weight
3753   // to the vector loop, optimistically.
3754   //
3755   // For scalable vectorization we can't know at compile time how many iterations
3756   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3757   // vscale of '1'.
3758   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop,
3759                                LI->getLoopFor(LoopScalarBody),
3760                                VF.getKnownMinValue() * UF);
3761 }
3762 
3763 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3764   // In order to support recurrences we need to be able to vectorize Phi nodes.
3765   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3766   // stage #2: We now need to fix the recurrences by adding incoming edges to
3767   // the currently empty PHI nodes. At this point every instruction in the
3768   // original loop is widened to a vector form so we can use them to construct
3769   // the incoming edges.
3770   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
3771   for (VPRecipeBase &R : Header->phis()) {
3772     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
3773       fixReduction(ReductionPhi, State);
3774     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3775       fixFirstOrderRecurrence(FOR, State);
3776   }
3777 }
3778 
3779 void InnerLoopVectorizer::fixFirstOrderRecurrence(
3780     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
3781   // This is the second phase of vectorizing first-order recurrences. An
3782   // overview of the transformation is described below. Suppose we have the
3783   // following loop.
3784   //
3785   //   for (int i = 0; i < n; ++i)
3786   //     b[i] = a[i] - a[i - 1];
3787   //
3788   // There is a first-order recurrence on "a". For this loop, the shorthand
3789   // scalar IR looks like:
3790   //
3791   //   scalar.ph:
3792   //     s_init = a[-1]
3793   //     br scalar.body
3794   //
3795   //   scalar.body:
3796   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3797   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3798   //     s2 = a[i]
3799   //     b[i] = s2 - s1
3800   //     br cond, scalar.body, ...
3801   //
3802   // In this example, s1 is a recurrence because it's value depends on the
3803   // previous iteration. In the first phase of vectorization, we created a
3804   // vector phi v1 for s1. We now complete the vectorization and produce the
3805   // shorthand vector IR shown below (for VF = 4, UF = 1).
3806   //
3807   //   vector.ph:
3808   //     v_init = vector(..., ..., ..., a[-1])
3809   //     br vector.body
3810   //
3811   //   vector.body
3812   //     i = phi [0, vector.ph], [i+4, vector.body]
3813   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3814   //     v2 = a[i, i+1, i+2, i+3];
3815   //     v3 = vector(v1(3), v2(0, 1, 2))
3816   //     b[i, i+1, i+2, i+3] = v2 - v3
3817   //     br cond, vector.body, middle.block
3818   //
3819   //   middle.block:
3820   //     x = v2(3)
3821   //     br scalar.ph
3822   //
3823   //   scalar.ph:
3824   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3825   //     br scalar.body
3826   //
3827   // After execution completes the vector loop, we extract the next value of
3828   // the recurrence (x) to use as the initial value in the scalar loop.
3829 
3830   // Extract the last vector element in the middle block. This will be the
3831   // initial value for the recurrence when jumping to the scalar loop.
3832   VPValue *PreviousDef = PhiR->getBackedgeValue();
3833   Value *Incoming = State.get(PreviousDef, UF - 1);
3834   auto *ExtractForScalar = Incoming;
3835   auto *IdxTy = Builder.getInt32Ty();
3836   if (VF.isVector()) {
3837     auto *One = ConstantInt::get(IdxTy, 1);
3838     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3839     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3840     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3841     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
3842                                                     "vector.recur.extract");
3843   }
3844   // Extract the second last element in the middle block if the
3845   // Phi is used outside the loop. We need to extract the phi itself
3846   // and not the last element (the phi update in the current iteration). This
3847   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3848   // when the scalar loop is not run at all.
3849   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3850   if (VF.isVector()) {
3851     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3852     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
3853     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3854         Incoming, Idx, "vector.recur.extract.for.phi");
3855   } else if (UF > 1)
3856     // When loop is unrolled without vectorizing, initialize
3857     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
3858     // of `Incoming`. This is analogous to the vectorized case above: extracting
3859     // the second last element when VF > 1.
3860     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
3861 
3862   // Fix the initial value of the original recurrence in the scalar loop.
3863   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3864   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
3865   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3866   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
3867   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3868     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3869     Start->addIncoming(Incoming, BB);
3870   }
3871 
3872   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3873   Phi->setName("scalar.recur");
3874 
3875   // Finally, fix users of the recurrence outside the loop. The users will need
3876   // either the last value of the scalar recurrence or the last value of the
3877   // vector recurrence we extracted in the middle block. Since the loop is in
3878   // LCSSA form, we just need to find all the phi nodes for the original scalar
3879   // recurrence in the exit block, and then add an edge for the middle block.
3880   // Note that LCSSA does not imply single entry when the original scalar loop
3881   // had multiple exiting edges (as we always run the last iteration in the
3882   // scalar epilogue); in that case, there is no edge from middle to exit and
3883   // and thus no phis which needed updated.
3884   if (!Cost->requiresScalarEpilogue(VF))
3885     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
3886       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
3887         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3888 }
3889 
3890 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
3891                                        VPTransformState &State) {
3892   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
3893   // Get it's reduction variable descriptor.
3894   assert(Legal->isReductionVariable(OrigPhi) &&
3895          "Unable to find the reduction variable");
3896   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
3897 
3898   RecurKind RK = RdxDesc.getRecurrenceKind();
3899   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3900   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3901   setDebugLocFromInst(ReductionStartValue);
3902 
3903   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
3904   // This is the vector-clone of the value that leaves the loop.
3905   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
3906 
3907   // Wrap flags are in general invalid after vectorization, clear them.
3908   clearReductionWrapFlags(RdxDesc, State);
3909 
3910   // Before each round, move the insertion point right between
3911   // the PHIs and the values we are going to write.
3912   // This allows us to write both PHINodes and the extractelement
3913   // instructions.
3914   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3915 
3916   setDebugLocFromInst(LoopExitInst);
3917 
3918   Type *PhiTy = OrigPhi->getType();
3919   BasicBlock *VectorLoopLatch =
3920       LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
3921   // If tail is folded by masking, the vector value to leave the loop should be
3922   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3923   // instead of the former. For an inloop reduction the reduction will already
3924   // be predicated, and does not need to be handled here.
3925   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
3926     for (unsigned Part = 0; Part < UF; ++Part) {
3927       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
3928       Value *Sel = nullptr;
3929       for (User *U : VecLoopExitInst->users()) {
3930         if (isa<SelectInst>(U)) {
3931           assert(!Sel && "Reduction exit feeding two selects");
3932           Sel = U;
3933         } else
3934           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3935       }
3936       assert(Sel && "Reduction exit feeds no select");
3937       State.reset(LoopExitInstDef, Sel, Part);
3938 
3939       // If the target can create a predicated operator for the reduction at no
3940       // extra cost in the loop (for example a predicated vadd), it can be
3941       // cheaper for the select to remain in the loop than be sunk out of it,
3942       // and so use the select value for the phi instead of the old
3943       // LoopExitValue.
3944       if (PreferPredicatedReductionSelect ||
3945           TTI->preferPredicatedReductionSelect(
3946               RdxDesc.getOpcode(), PhiTy,
3947               TargetTransformInfo::ReductionFlags())) {
3948         auto *VecRdxPhi =
3949             cast<PHINode>(State.get(PhiR, Part));
3950         VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel);
3951       }
3952     }
3953   }
3954 
3955   // If the vector reduction can be performed in a smaller type, we truncate
3956   // then extend the loop exit value to enable InstCombine to evaluate the
3957   // entire expression in the smaller type.
3958   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
3959     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
3960     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3961     Builder.SetInsertPoint(VectorLoopLatch->getTerminator());
3962     VectorParts RdxParts(UF);
3963     for (unsigned Part = 0; Part < UF; ++Part) {
3964       RdxParts[Part] = State.get(LoopExitInstDef, Part);
3965       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3966       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3967                                         : Builder.CreateZExt(Trunc, VecTy);
3968       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
3969         if (U != Trunc) {
3970           U->replaceUsesOfWith(RdxParts[Part], Extnd);
3971           RdxParts[Part] = Extnd;
3972         }
3973     }
3974     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3975     for (unsigned Part = 0; Part < UF; ++Part) {
3976       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3977       State.reset(LoopExitInstDef, RdxParts[Part], Part);
3978     }
3979   }
3980 
3981   // Reduce all of the unrolled parts into a single vector.
3982   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
3983   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
3984 
3985   // The middle block terminator has already been assigned a DebugLoc here (the
3986   // OrigLoop's single latch terminator). We want the whole middle block to
3987   // appear to execute on this line because: (a) it is all compiler generated,
3988   // (b) these instructions are always executed after evaluating the latch
3989   // conditional branch, and (c) other passes may add new predecessors which
3990   // terminate on this line. This is the easiest way to ensure we don't
3991   // accidentally cause an extra step back into the loop while debugging.
3992   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
3993   if (PhiR->isOrdered())
3994     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
3995   else {
3996     // Floating-point operations should have some FMF to enable the reduction.
3997     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
3998     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
3999     for (unsigned Part = 1; Part < UF; ++Part) {
4000       Value *RdxPart = State.get(LoopExitInstDef, Part);
4001       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4002         ReducedPartRdx = Builder.CreateBinOp(
4003             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4004       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4005         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4006                                            ReducedPartRdx, RdxPart);
4007       else
4008         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4009     }
4010   }
4011 
4012   // Create the reduction after the loop. Note that inloop reductions create the
4013   // target reduction in the loop using a Reduction recipe.
4014   if (VF.isVector() && !PhiR->isInLoop()) {
4015     ReducedPartRdx =
4016         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4017     // If the reduction can be performed in a smaller type, we need to extend
4018     // the reduction to the wider type before we branch to the original loop.
4019     if (PhiTy != RdxDesc.getRecurrenceType())
4020       ReducedPartRdx = RdxDesc.isSigned()
4021                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4022                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4023   }
4024 
4025   PHINode *ResumePhi =
4026       dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
4027 
4028   // Create a phi node that merges control-flow from the backedge-taken check
4029   // block and the middle block.
4030   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4031                                         LoopScalarPreHeader->getTerminator());
4032 
4033   // If we are fixing reductions in the epilogue loop then we should already
4034   // have created a bc.merge.rdx Phi after the main vector body. Ensure that
4035   // we carry over the incoming values correctly.
4036   for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
4037     if (Incoming == LoopMiddleBlock)
4038       BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
4039     else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
4040       BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
4041                               Incoming);
4042     else
4043       BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
4044   }
4045 
4046   // Set the resume value for this reduction
4047   ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
4048 
4049   // Now, we need to fix the users of the reduction variable
4050   // inside and outside of the scalar remainder loop.
4051 
4052   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4053   // in the exit blocks.  See comment on analogous loop in
4054   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4055   if (!Cost->requiresScalarEpilogue(VF))
4056     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4057       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4058         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4059 
4060   // Fix the scalar loop reduction variable with the incoming reduction sum
4061   // from the vector body and from the backedge value.
4062   int IncomingEdgeBlockIdx =
4063       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4064   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4065   // Pick the other block.
4066   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4067   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4068   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4069 }
4070 
4071 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4072                                                   VPTransformState &State) {
4073   RecurKind RK = RdxDesc.getRecurrenceKind();
4074   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4075     return;
4076 
4077   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4078   assert(LoopExitInstr && "null loop exit instruction");
4079   SmallVector<Instruction *, 8> Worklist;
4080   SmallPtrSet<Instruction *, 8> Visited;
4081   Worklist.push_back(LoopExitInstr);
4082   Visited.insert(LoopExitInstr);
4083 
4084   while (!Worklist.empty()) {
4085     Instruction *Cur = Worklist.pop_back_val();
4086     if (isa<OverflowingBinaryOperator>(Cur))
4087       for (unsigned Part = 0; Part < UF; ++Part) {
4088         // FIXME: Should not rely on getVPValue at this point.
4089         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4090         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4091       }
4092 
4093     for (User *U : Cur->users()) {
4094       Instruction *UI = cast<Instruction>(U);
4095       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4096           Visited.insert(UI).second)
4097         Worklist.push_back(UI);
4098     }
4099   }
4100 }
4101 
4102 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4103   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4104     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4105       // Some phis were already hand updated by the reduction and recurrence
4106       // code above, leave them alone.
4107       continue;
4108 
4109     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4110     // Non-instruction incoming values will have only one value.
4111 
4112     VPLane Lane = VPLane::getFirstLane();
4113     if (isa<Instruction>(IncomingValue) &&
4114         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4115                                            VF))
4116       Lane = VPLane::getLastLaneForVF(VF);
4117 
4118     // Can be a loop invariant incoming value or the last scalar value to be
4119     // extracted from the vectorized loop.
4120     // FIXME: Should not rely on getVPValue at this point.
4121     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4122     Value *lastIncomingValue =
4123         OrigLoop->isLoopInvariant(IncomingValue)
4124             ? IncomingValue
4125             : State.get(State.Plan->getVPValue(IncomingValue, true),
4126                         VPIteration(UF - 1, Lane));
4127     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4128   }
4129 }
4130 
4131 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4132   // The basic block and loop containing the predicated instruction.
4133   auto *PredBB = PredInst->getParent();
4134   auto *VectorLoop = LI->getLoopFor(PredBB);
4135 
4136   // Initialize a worklist with the operands of the predicated instruction.
4137   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4138 
4139   // Holds instructions that we need to analyze again. An instruction may be
4140   // reanalyzed if we don't yet know if we can sink it or not.
4141   SmallVector<Instruction *, 8> InstsToReanalyze;
4142 
4143   // Returns true if a given use occurs in the predicated block. Phi nodes use
4144   // their operands in their corresponding predecessor blocks.
4145   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4146     auto *I = cast<Instruction>(U.getUser());
4147     BasicBlock *BB = I->getParent();
4148     if (auto *Phi = dyn_cast<PHINode>(I))
4149       BB = Phi->getIncomingBlock(
4150           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4151     return BB == PredBB;
4152   };
4153 
4154   // Iteratively sink the scalarized operands of the predicated instruction
4155   // into the block we created for it. When an instruction is sunk, it's
4156   // operands are then added to the worklist. The algorithm ends after one pass
4157   // through the worklist doesn't sink a single instruction.
4158   bool Changed;
4159   do {
4160     // Add the instructions that need to be reanalyzed to the worklist, and
4161     // reset the changed indicator.
4162     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4163     InstsToReanalyze.clear();
4164     Changed = false;
4165 
4166     while (!Worklist.empty()) {
4167       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4168 
4169       // We can't sink an instruction if it is a phi node, is not in the loop,
4170       // or may have side effects.
4171       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4172           I->mayHaveSideEffects())
4173         continue;
4174 
4175       // If the instruction is already in PredBB, check if we can sink its
4176       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4177       // sinking the scalar instruction I, hence it appears in PredBB; but it
4178       // may have failed to sink I's operands (recursively), which we try
4179       // (again) here.
4180       if (I->getParent() == PredBB) {
4181         Worklist.insert(I->op_begin(), I->op_end());
4182         continue;
4183       }
4184 
4185       // It's legal to sink the instruction if all its uses occur in the
4186       // predicated block. Otherwise, there's nothing to do yet, and we may
4187       // need to reanalyze the instruction.
4188       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4189         InstsToReanalyze.push_back(I);
4190         continue;
4191       }
4192 
4193       // Move the instruction to the beginning of the predicated block, and add
4194       // it's operands to the worklist.
4195       I->moveBefore(&*PredBB->getFirstInsertionPt());
4196       Worklist.insert(I->op_begin(), I->op_end());
4197 
4198       // The sinking may have enabled other instructions to be sunk, so we will
4199       // need to iterate.
4200       Changed = true;
4201     }
4202   } while (Changed);
4203 }
4204 
4205 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4206   for (PHINode *OrigPhi : OrigPHIsToFix) {
4207     VPWidenPHIRecipe *VPPhi =
4208         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4209     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4210     // Make sure the builder has a valid insert point.
4211     Builder.SetInsertPoint(NewPhi);
4212     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4213       VPValue *Inc = VPPhi->getIncomingValue(i);
4214       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4215       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4216     }
4217   }
4218 }
4219 
4220 bool InnerLoopVectorizer::useOrderedReductions(
4221     const RecurrenceDescriptor &RdxDesc) {
4222   return Cost->useOrderedReductions(RdxDesc);
4223 }
4224 
4225 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4226                                               VPWidenPHIRecipe *PhiR,
4227                                               VPTransformState &State) {
4228   PHINode *P = cast<PHINode>(PN);
4229   if (EnableVPlanNativePath) {
4230     // Currently we enter here in the VPlan-native path for non-induction
4231     // PHIs where all control flow is uniform. We simply widen these PHIs.
4232     // Create a vector phi with no operands - the vector phi operands will be
4233     // set at the end of vector code generation.
4234     Type *VecTy = (State.VF.isScalar())
4235                       ? PN->getType()
4236                       : VectorType::get(PN->getType(), State.VF);
4237     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4238     State.set(PhiR, VecPhi, 0);
4239     OrigPHIsToFix.push_back(P);
4240 
4241     return;
4242   }
4243 
4244   assert(PN->getParent() == OrigLoop->getHeader() &&
4245          "Non-header phis should have been handled elsewhere");
4246 
4247   // In order to support recurrences we need to be able to vectorize Phi nodes.
4248   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4249   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4250   // this value when we vectorize all of the instructions that use the PHI.
4251 
4252   assert(!Legal->isReductionVariable(P) &&
4253          "reductions should be handled elsewhere");
4254 
4255   setDebugLocFromInst(P);
4256 
4257   // This PHINode must be an induction variable.
4258   // Make sure that we know about it.
4259   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4260 
4261   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4262   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4263 
4264   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4265   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4266 
4267   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4268   // which can be found from the original scalar operations.
4269   switch (II.getKind()) {
4270   case InductionDescriptor::IK_NoInduction:
4271     llvm_unreachable("Unknown induction");
4272   case InductionDescriptor::IK_IntInduction:
4273   case InductionDescriptor::IK_FpInduction:
4274     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4275   case InductionDescriptor::IK_PtrInduction: {
4276     // Handle the pointer induction variable case.
4277     assert(P->getType()->isPointerTy() && "Unexpected type.");
4278 
4279     if (all_of(PhiR->users(), [PhiR](const VPUser *U) {
4280           return cast<VPRecipeBase>(U)->usesScalars(PhiR);
4281         })) {
4282       // This is the normalized GEP that starts counting at zero.
4283       Value *PtrInd =
4284           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4285       // Determine the number of scalars we need to generate for each unroll
4286       // iteration. If the instruction is uniform, we only need to generate the
4287       // first lane. Otherwise, we generate all VF values.
4288       bool IsUniform = vputils::onlyFirstLaneUsed(PhiR);
4289       assert((IsUniform || !State.VF.isScalable()) &&
4290              "Cannot scalarize a scalable VF");
4291       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4292 
4293       for (unsigned Part = 0; Part < UF; ++Part) {
4294         Value *PartStart =
4295             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4296 
4297         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4298           Value *Idx = Builder.CreateAdd(
4299               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4300           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4301 
4302           Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
4303                                         State.CFG.PrevBB->getTerminator());
4304           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx,
4305                                                 II.getStartValue(), Step, II);
4306           SclrGep->setName("next.gep");
4307           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4308         }
4309       }
4310       return;
4311     }
4312     assert(isa<SCEVConstant>(II.getStep()) &&
4313            "Induction step not a SCEV constant!");
4314     Type *PhiType = II.getStep()->getType();
4315 
4316     // Build a pointer phi
4317     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4318     Type *ScStValueType = ScalarStartValue->getType();
4319     PHINode *NewPointerPhi =
4320         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4321     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4322 
4323     // A pointer induction, performed by using a gep
4324     BasicBlock *LoopLatch = LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
4325     Instruction *InductionLoc = LoopLatch->getTerminator();
4326     const SCEV *ScalarStep = II.getStep();
4327     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4328     Value *ScalarStepValue =
4329         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4330     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4331     Value *NumUnrolledElems =
4332         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4333     Value *InductionGEP = GetElementPtrInst::Create(
4334         II.getElementType(), NewPointerPhi,
4335         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4336         InductionLoc);
4337     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4338 
4339     // Create UF many actual address geps that use the pointer
4340     // phi as base and a vectorized version of the step value
4341     // (<step*0, ..., step*N>) as offset.
4342     for (unsigned Part = 0; Part < State.UF; ++Part) {
4343       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4344       Value *StartOffsetScalar =
4345           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4346       Value *StartOffset =
4347           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4348       // Create a vector of consecutive numbers from zero to VF.
4349       StartOffset =
4350           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4351 
4352       Value *GEP = Builder.CreateGEP(
4353           II.getElementType(), NewPointerPhi,
4354           Builder.CreateMul(
4355               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4356               "vector.gep"));
4357       State.set(PhiR, GEP, Part);
4358     }
4359   }
4360   }
4361 }
4362 
4363 /// A helper function for checking whether an integer division-related
4364 /// instruction may divide by zero (in which case it must be predicated if
4365 /// executed conditionally in the scalar code).
4366 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4367 /// Non-zero divisors that are non compile-time constants will not be
4368 /// converted into multiplication, so we will still end up scalarizing
4369 /// the division, but can do so w/o predication.
4370 static bool mayDivideByZero(Instruction &I) {
4371   assert((I.getOpcode() == Instruction::UDiv ||
4372           I.getOpcode() == Instruction::SDiv ||
4373           I.getOpcode() == Instruction::URem ||
4374           I.getOpcode() == Instruction::SRem) &&
4375          "Unexpected instruction");
4376   Value *Divisor = I.getOperand(1);
4377   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4378   return !CInt || CInt->isZero();
4379 }
4380 
4381 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4382                                                VPUser &ArgOperands,
4383                                                VPTransformState &State) {
4384   assert(!isa<DbgInfoIntrinsic>(I) &&
4385          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4386   setDebugLocFromInst(&I);
4387 
4388   Module *M = I.getParent()->getParent()->getParent();
4389   auto *CI = cast<CallInst>(&I);
4390 
4391   SmallVector<Type *, 4> Tys;
4392   for (Value *ArgOperand : CI->args())
4393     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4394 
4395   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4396 
4397   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4398   // version of the instruction.
4399   // Is it beneficial to perform intrinsic call compared to lib call?
4400   bool NeedToScalarize = false;
4401   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4402   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4403   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4404   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4405          "Instruction should be scalarized elsewhere.");
4406   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4407          "Either the intrinsic cost or vector call cost must be valid");
4408 
4409   for (unsigned Part = 0; Part < UF; ++Part) {
4410     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4411     SmallVector<Value *, 4> Args;
4412     for (auto &I : enumerate(ArgOperands.operands())) {
4413       // Some intrinsics have a scalar argument - don't replace it with a
4414       // vector.
4415       Value *Arg;
4416       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4417         Arg = State.get(I.value(), Part);
4418       else {
4419         Arg = State.get(I.value(), VPIteration(0, 0));
4420         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4421           TysForDecl.push_back(Arg->getType());
4422       }
4423       Args.push_back(Arg);
4424     }
4425 
4426     Function *VectorF;
4427     if (UseVectorIntrinsic) {
4428       // Use vector version of the intrinsic.
4429       if (VF.isVector())
4430         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4431       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4432       assert(VectorF && "Can't retrieve vector intrinsic.");
4433     } else {
4434       // Use vector version of the function call.
4435       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4436 #ifndef NDEBUG
4437       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4438              "Can't create vector function.");
4439 #endif
4440         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4441     }
4442       SmallVector<OperandBundleDef, 1> OpBundles;
4443       CI->getOperandBundlesAsDefs(OpBundles);
4444       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4445 
4446       if (isa<FPMathOperator>(V))
4447         V->copyFastMathFlags(CI);
4448 
4449       State.set(Def, V, Part);
4450       addMetadata(V, &I);
4451   }
4452 }
4453 
4454 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4455   // We should not collect Scalars more than once per VF. Right now, this
4456   // function is called from collectUniformsAndScalars(), which already does
4457   // this check. Collecting Scalars for VF=1 does not make any sense.
4458   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4459          "This function should not be visited twice for the same VF");
4460 
4461   // This avoids any chances of creating a REPLICATE recipe during planning
4462   // since that would result in generation of scalarized code during execution,
4463   // which is not supported for scalable vectors.
4464   if (VF.isScalable()) {
4465     Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
4466     return;
4467   }
4468 
4469   SmallSetVector<Instruction *, 8> Worklist;
4470 
4471   // These sets are used to seed the analysis with pointers used by memory
4472   // accesses that will remain scalar.
4473   SmallSetVector<Instruction *, 8> ScalarPtrs;
4474   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4475   auto *Latch = TheLoop->getLoopLatch();
4476 
4477   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4478   // The pointer operands of loads and stores will be scalar as long as the
4479   // memory access is not a gather or scatter operation. The value operand of a
4480   // store will remain scalar if the store is scalarized.
4481   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4482     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4483     assert(WideningDecision != CM_Unknown &&
4484            "Widening decision should be ready at this moment");
4485     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4486       if (Ptr == Store->getValueOperand())
4487         return WideningDecision == CM_Scalarize;
4488     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4489            "Ptr is neither a value or pointer operand");
4490     return WideningDecision != CM_GatherScatter;
4491   };
4492 
4493   // A helper that returns true if the given value is a bitcast or
4494   // getelementptr instruction contained in the loop.
4495   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4496     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4497             isa<GetElementPtrInst>(V)) &&
4498            !TheLoop->isLoopInvariant(V);
4499   };
4500 
4501   // A helper that evaluates a memory access's use of a pointer. If the use will
4502   // be a scalar use and the pointer is only used by memory accesses, we place
4503   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4504   // PossibleNonScalarPtrs.
4505   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4506     // We only care about bitcast and getelementptr instructions contained in
4507     // the loop.
4508     if (!isLoopVaryingBitCastOrGEP(Ptr))
4509       return;
4510 
4511     // If the pointer has already been identified as scalar (e.g., if it was
4512     // also identified as uniform), there's nothing to do.
4513     auto *I = cast<Instruction>(Ptr);
4514     if (Worklist.count(I))
4515       return;
4516 
4517     // If the use of the pointer will be a scalar use, and all users of the
4518     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4519     // place the pointer in PossibleNonScalarPtrs.
4520     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4521           return isa<LoadInst>(U) || isa<StoreInst>(U);
4522         }))
4523       ScalarPtrs.insert(I);
4524     else
4525       PossibleNonScalarPtrs.insert(I);
4526   };
4527 
4528   // We seed the scalars analysis with three classes of instructions: (1)
4529   // instructions marked uniform-after-vectorization and (2) bitcast,
4530   // getelementptr and (pointer) phi instructions used by memory accesses
4531   // requiring a scalar use.
4532   //
4533   // (1) Add to the worklist all instructions that have been identified as
4534   // uniform-after-vectorization.
4535   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4536 
4537   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4538   // memory accesses requiring a scalar use. The pointer operands of loads and
4539   // stores will be scalar as long as the memory accesses is not a gather or
4540   // scatter operation. The value operand of a store will remain scalar if the
4541   // store is scalarized.
4542   for (auto *BB : TheLoop->blocks())
4543     for (auto &I : *BB) {
4544       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4545         evaluatePtrUse(Load, Load->getPointerOperand());
4546       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4547         evaluatePtrUse(Store, Store->getPointerOperand());
4548         evaluatePtrUse(Store, Store->getValueOperand());
4549       }
4550     }
4551   for (auto *I : ScalarPtrs)
4552     if (!PossibleNonScalarPtrs.count(I)) {
4553       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4554       Worklist.insert(I);
4555     }
4556 
4557   // Insert the forced scalars.
4558   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4559   // induction variable when the PHI user is scalarized.
4560   auto ForcedScalar = ForcedScalars.find(VF);
4561   if (ForcedScalar != ForcedScalars.end())
4562     for (auto *I : ForcedScalar->second)
4563       Worklist.insert(I);
4564 
4565   // Expand the worklist by looking through any bitcasts and getelementptr
4566   // instructions we've already identified as scalar. This is similar to the
4567   // expansion step in collectLoopUniforms(); however, here we're only
4568   // expanding to include additional bitcasts and getelementptr instructions.
4569   unsigned Idx = 0;
4570   while (Idx != Worklist.size()) {
4571     Instruction *Dst = Worklist[Idx++];
4572     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4573       continue;
4574     auto *Src = cast<Instruction>(Dst->getOperand(0));
4575     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4576           auto *J = cast<Instruction>(U);
4577           return !TheLoop->contains(J) || Worklist.count(J) ||
4578                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4579                   isScalarUse(J, Src));
4580         })) {
4581       Worklist.insert(Src);
4582       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4583     }
4584   }
4585 
4586   // An induction variable will remain scalar if all users of the induction
4587   // variable and induction variable update remain scalar.
4588   for (auto &Induction : Legal->getInductionVars()) {
4589     auto *Ind = Induction.first;
4590     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4591 
4592     // If tail-folding is applied, the primary induction variable will be used
4593     // to feed a vector compare.
4594     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4595       continue;
4596 
4597     // Returns true if \p Indvar is a pointer induction that is used directly by
4598     // load/store instruction \p I.
4599     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4600                                               Instruction *I) {
4601       return Induction.second.getKind() ==
4602                  InductionDescriptor::IK_PtrInduction &&
4603              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4604              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4605     };
4606 
4607     // Determine if all users of the induction variable are scalar after
4608     // vectorization.
4609     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4610       auto *I = cast<Instruction>(U);
4611       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4612              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4613     });
4614     if (!ScalarInd)
4615       continue;
4616 
4617     // Determine if all users of the induction variable update instruction are
4618     // scalar after vectorization.
4619     auto ScalarIndUpdate =
4620         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4621           auto *I = cast<Instruction>(U);
4622           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4623                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4624         });
4625     if (!ScalarIndUpdate)
4626       continue;
4627 
4628     // The induction variable and its update instruction will remain scalar.
4629     Worklist.insert(Ind);
4630     Worklist.insert(IndUpdate);
4631     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4632     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4633                       << "\n");
4634   }
4635 
4636   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4637 }
4638 
4639 bool LoopVectorizationCostModel::isScalarWithPredication(
4640     Instruction *I, ElementCount VF) const {
4641   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4642     return false;
4643   switch(I->getOpcode()) {
4644   default:
4645     break;
4646   case Instruction::Load:
4647   case Instruction::Store: {
4648     if (!Legal->isMaskRequired(I))
4649       return false;
4650     auto *Ptr = getLoadStorePointerOperand(I);
4651     auto *Ty = getLoadStoreType(I);
4652     Type *VTy = Ty;
4653     if (VF.isVector())
4654       VTy = VectorType::get(Ty, VF);
4655     const Align Alignment = getLoadStoreAlignment(I);
4656     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4657                                 TTI.isLegalMaskedGather(VTy, Alignment))
4658                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4659                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4660   }
4661   case Instruction::UDiv:
4662   case Instruction::SDiv:
4663   case Instruction::SRem:
4664   case Instruction::URem:
4665     return mayDivideByZero(*I);
4666   }
4667   return false;
4668 }
4669 
4670 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4671     Instruction *I, ElementCount VF) {
4672   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4673   assert(getWideningDecision(I, VF) == CM_Unknown &&
4674          "Decision should not be set yet.");
4675   auto *Group = getInterleavedAccessGroup(I);
4676   assert(Group && "Must have a group.");
4677 
4678   // If the instruction's allocated size doesn't equal it's type size, it
4679   // requires padding and will be scalarized.
4680   auto &DL = I->getModule()->getDataLayout();
4681   auto *ScalarTy = getLoadStoreType(I);
4682   if (hasIrregularType(ScalarTy, DL))
4683     return false;
4684 
4685   // Check if masking is required.
4686   // A Group may need masking for one of two reasons: it resides in a block that
4687   // needs predication, or it was decided to use masking to deal with gaps
4688   // (either a gap at the end of a load-access that may result in a speculative
4689   // load, or any gaps in a store-access).
4690   bool PredicatedAccessRequiresMasking =
4691       blockNeedsPredicationForAnyReason(I->getParent()) &&
4692       Legal->isMaskRequired(I);
4693   bool LoadAccessWithGapsRequiresEpilogMasking =
4694       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4695       !isScalarEpilogueAllowed();
4696   bool StoreAccessWithGapsRequiresMasking =
4697       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4698   if (!PredicatedAccessRequiresMasking &&
4699       !LoadAccessWithGapsRequiresEpilogMasking &&
4700       !StoreAccessWithGapsRequiresMasking)
4701     return true;
4702 
4703   // If masked interleaving is required, we expect that the user/target had
4704   // enabled it, because otherwise it either wouldn't have been created or
4705   // it should have been invalidated by the CostModel.
4706   assert(useMaskedInterleavedAccesses(TTI) &&
4707          "Masked interleave-groups for predicated accesses are not enabled.");
4708 
4709   if (Group->isReverse())
4710     return false;
4711 
4712   auto *Ty = getLoadStoreType(I);
4713   const Align Alignment = getLoadStoreAlignment(I);
4714   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4715                           : TTI.isLegalMaskedStore(Ty, Alignment);
4716 }
4717 
4718 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4719     Instruction *I, ElementCount VF) {
4720   // Get and ensure we have a valid memory instruction.
4721   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4722 
4723   auto *Ptr = getLoadStorePointerOperand(I);
4724   auto *ScalarTy = getLoadStoreType(I);
4725 
4726   // In order to be widened, the pointer should be consecutive, first of all.
4727   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4728     return false;
4729 
4730   // If the instruction is a store located in a predicated block, it will be
4731   // scalarized.
4732   if (isScalarWithPredication(I, VF))
4733     return false;
4734 
4735   // If the instruction's allocated size doesn't equal it's type size, it
4736   // requires padding and will be scalarized.
4737   auto &DL = I->getModule()->getDataLayout();
4738   if (hasIrregularType(ScalarTy, DL))
4739     return false;
4740 
4741   return true;
4742 }
4743 
4744 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4745   // We should not collect Uniforms more than once per VF. Right now,
4746   // this function is called from collectUniformsAndScalars(), which
4747   // already does this check. Collecting Uniforms for VF=1 does not make any
4748   // sense.
4749 
4750   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4751          "This function should not be visited twice for the same VF");
4752 
4753   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4754   // not analyze again.  Uniforms.count(VF) will return 1.
4755   Uniforms[VF].clear();
4756 
4757   // We now know that the loop is vectorizable!
4758   // Collect instructions inside the loop that will remain uniform after
4759   // vectorization.
4760 
4761   // Global values, params and instructions outside of current loop are out of
4762   // scope.
4763   auto isOutOfScope = [&](Value *V) -> bool {
4764     Instruction *I = dyn_cast<Instruction>(V);
4765     return (!I || !TheLoop->contains(I));
4766   };
4767 
4768   // Worklist containing uniform instructions demanding lane 0.
4769   SetVector<Instruction *> Worklist;
4770   BasicBlock *Latch = TheLoop->getLoopLatch();
4771 
4772   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4773   // that are scalar with predication must not be considered uniform after
4774   // vectorization, because that would create an erroneous replicating region
4775   // where only a single instance out of VF should be formed.
4776   // TODO: optimize such seldom cases if found important, see PR40816.
4777   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4778     if (isOutOfScope(I)) {
4779       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
4780                         << *I << "\n");
4781       return;
4782     }
4783     if (isScalarWithPredication(I, VF)) {
4784       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4785                         << *I << "\n");
4786       return;
4787     }
4788     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4789     Worklist.insert(I);
4790   };
4791 
4792   // Start with the conditional branch. If the branch condition is an
4793   // instruction contained in the loop that is only used by the branch, it is
4794   // uniform.
4795   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4796   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4797     addToWorklistIfAllowed(Cmp);
4798 
4799   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
4800     InstWidening WideningDecision = getWideningDecision(I, VF);
4801     assert(WideningDecision != CM_Unknown &&
4802            "Widening decision should be ready at this moment");
4803 
4804     // A uniform memory op is itself uniform.  We exclude uniform stores
4805     // here as they demand the last lane, not the first one.
4806     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
4807       assert(WideningDecision == CM_Scalarize);
4808       return true;
4809     }
4810 
4811     return (WideningDecision == CM_Widen ||
4812             WideningDecision == CM_Widen_Reverse ||
4813             WideningDecision == CM_Interleave);
4814   };
4815 
4816 
4817   // Returns true if Ptr is the pointer operand of a memory access instruction
4818   // I, and I is known to not require scalarization.
4819   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4820     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4821   };
4822 
4823   // Holds a list of values which are known to have at least one uniform use.
4824   // Note that there may be other uses which aren't uniform.  A "uniform use"
4825   // here is something which only demands lane 0 of the unrolled iterations;
4826   // it does not imply that all lanes produce the same value (e.g. this is not
4827   // the usual meaning of uniform)
4828   SetVector<Value *> HasUniformUse;
4829 
4830   // Scan the loop for instructions which are either a) known to have only
4831   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
4832   for (auto *BB : TheLoop->blocks())
4833     for (auto &I : *BB) {
4834       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
4835         switch (II->getIntrinsicID()) {
4836         case Intrinsic::sideeffect:
4837         case Intrinsic::experimental_noalias_scope_decl:
4838         case Intrinsic::assume:
4839         case Intrinsic::lifetime_start:
4840         case Intrinsic::lifetime_end:
4841           if (TheLoop->hasLoopInvariantOperands(&I))
4842             addToWorklistIfAllowed(&I);
4843           break;
4844         default:
4845           break;
4846         }
4847       }
4848 
4849       // ExtractValue instructions must be uniform, because the operands are
4850       // known to be loop-invariant.
4851       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
4852         assert(isOutOfScope(EVI->getAggregateOperand()) &&
4853                "Expected aggregate value to be loop invariant");
4854         addToWorklistIfAllowed(EVI);
4855         continue;
4856       }
4857 
4858       // If there's no pointer operand, there's nothing to do.
4859       auto *Ptr = getLoadStorePointerOperand(&I);
4860       if (!Ptr)
4861         continue;
4862 
4863       // A uniform memory op is itself uniform.  We exclude uniform stores
4864       // here as they demand the last lane, not the first one.
4865       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
4866         addToWorklistIfAllowed(&I);
4867 
4868       if (isUniformDecision(&I, VF)) {
4869         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
4870         HasUniformUse.insert(Ptr);
4871       }
4872     }
4873 
4874   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
4875   // demanding) users.  Since loops are assumed to be in LCSSA form, this
4876   // disallows uses outside the loop as well.
4877   for (auto *V : HasUniformUse) {
4878     if (isOutOfScope(V))
4879       continue;
4880     auto *I = cast<Instruction>(V);
4881     auto UsersAreMemAccesses =
4882       llvm::all_of(I->users(), [&](User *U) -> bool {
4883         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4884       });
4885     if (UsersAreMemAccesses)
4886       addToWorklistIfAllowed(I);
4887   }
4888 
4889   // Expand Worklist in topological order: whenever a new instruction
4890   // is added , its users should be already inside Worklist.  It ensures
4891   // a uniform instruction will only be used by uniform instructions.
4892   unsigned idx = 0;
4893   while (idx != Worklist.size()) {
4894     Instruction *I = Worklist[idx++];
4895 
4896     for (auto OV : I->operand_values()) {
4897       // isOutOfScope operands cannot be uniform instructions.
4898       if (isOutOfScope(OV))
4899         continue;
4900       // First order recurrence Phi's should typically be considered
4901       // non-uniform.
4902       auto *OP = dyn_cast<PHINode>(OV);
4903       if (OP && Legal->isFirstOrderRecurrence(OP))
4904         continue;
4905       // If all the users of the operand are uniform, then add the
4906       // operand into the uniform worklist.
4907       auto *OI = cast<Instruction>(OV);
4908       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4909             auto *J = cast<Instruction>(U);
4910             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4911           }))
4912         addToWorklistIfAllowed(OI);
4913     }
4914   }
4915 
4916   // For an instruction to be added into Worklist above, all its users inside
4917   // the loop should also be in Worklist. However, this condition cannot be
4918   // true for phi nodes that form a cyclic dependence. We must process phi
4919   // nodes separately. An induction variable will remain uniform if all users
4920   // of the induction variable and induction variable update remain uniform.
4921   // The code below handles both pointer and non-pointer induction variables.
4922   for (auto &Induction : Legal->getInductionVars()) {
4923     auto *Ind = Induction.first;
4924     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4925 
4926     // Determine if all users of the induction variable are uniform after
4927     // vectorization.
4928     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4929       auto *I = cast<Instruction>(U);
4930       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4931              isVectorizedMemAccessUse(I, Ind);
4932     });
4933     if (!UniformInd)
4934       continue;
4935 
4936     // Determine if all users of the induction variable update instruction are
4937     // uniform after vectorization.
4938     auto UniformIndUpdate =
4939         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4940           auto *I = cast<Instruction>(U);
4941           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4942                  isVectorizedMemAccessUse(I, IndUpdate);
4943         });
4944     if (!UniformIndUpdate)
4945       continue;
4946 
4947     // The induction variable and its update instruction will remain uniform.
4948     addToWorklistIfAllowed(Ind);
4949     addToWorklistIfAllowed(IndUpdate);
4950   }
4951 
4952   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4953 }
4954 
4955 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4956   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4957 
4958   if (Legal->getRuntimePointerChecking()->Need) {
4959     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4960         "runtime pointer checks needed. Enable vectorization of this "
4961         "loop with '#pragma clang loop vectorize(enable)' when "
4962         "compiling with -Os/-Oz",
4963         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4964     return true;
4965   }
4966 
4967   if (!PSE.getPredicate().isAlwaysTrue()) {
4968     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4969         "runtime SCEV checks needed. Enable vectorization of this "
4970         "loop with '#pragma clang loop vectorize(enable)' when "
4971         "compiling with -Os/-Oz",
4972         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4973     return true;
4974   }
4975 
4976   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4977   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4978     reportVectorizationFailure("Runtime stride check for small trip count",
4979         "runtime stride == 1 checks needed. Enable vectorization of "
4980         "this loop without such check by compiling with -Os/-Oz",
4981         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4982     return true;
4983   }
4984 
4985   return false;
4986 }
4987 
4988 ElementCount
4989 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
4990   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
4991     return ElementCount::getScalable(0);
4992 
4993   if (Hints->isScalableVectorizationDisabled()) {
4994     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
4995                             "ScalableVectorizationDisabled", ORE, TheLoop);
4996     return ElementCount::getScalable(0);
4997   }
4998 
4999   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5000 
5001   auto MaxScalableVF = ElementCount::getScalable(
5002       std::numeric_limits<ElementCount::ScalarTy>::max());
5003 
5004   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5005   // FIXME: While for scalable vectors this is currently sufficient, this should
5006   // be replaced by a more detailed mechanism that filters out specific VFs,
5007   // instead of invalidating vectorization for a whole set of VFs based on the
5008   // MaxVF.
5009 
5010   // Disable scalable vectorization if the loop contains unsupported reductions.
5011   if (!canVectorizeReductions(MaxScalableVF)) {
5012     reportVectorizationInfo(
5013         "Scalable vectorization not supported for the reduction "
5014         "operations found in this loop.",
5015         "ScalableVFUnfeasible", ORE, TheLoop);
5016     return ElementCount::getScalable(0);
5017   }
5018 
5019   // Disable scalable vectorization if the loop contains any instructions
5020   // with element types not supported for scalable vectors.
5021   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5022         return !Ty->isVoidTy() &&
5023                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5024       })) {
5025     reportVectorizationInfo("Scalable vectorization is not supported "
5026                             "for all element types found in this loop.",
5027                             "ScalableVFUnfeasible", ORE, TheLoop);
5028     return ElementCount::getScalable(0);
5029   }
5030 
5031   if (Legal->isSafeForAnyVectorWidth())
5032     return MaxScalableVF;
5033 
5034   // Limit MaxScalableVF by the maximum safe dependence distance.
5035   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5036   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5037     MaxVScale =
5038         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5039   MaxScalableVF = ElementCount::getScalable(
5040       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5041   if (!MaxScalableVF)
5042     reportVectorizationInfo(
5043         "Max legal vector width too small, scalable vectorization "
5044         "unfeasible.",
5045         "ScalableVFUnfeasible", ORE, TheLoop);
5046 
5047   return MaxScalableVF;
5048 }
5049 
5050 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5051     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5052   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5053   unsigned SmallestType, WidestType;
5054   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5055 
5056   // Get the maximum safe dependence distance in bits computed by LAA.
5057   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5058   // the memory accesses that is most restrictive (involved in the smallest
5059   // dependence distance).
5060   unsigned MaxSafeElements =
5061       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5062 
5063   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5064   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5065 
5066   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5067                     << ".\n");
5068   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5069                     << ".\n");
5070 
5071   // First analyze the UserVF, fall back if the UserVF should be ignored.
5072   if (UserVF) {
5073     auto MaxSafeUserVF =
5074         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5075 
5076     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5077       // If `VF=vscale x N` is safe, then so is `VF=N`
5078       if (UserVF.isScalable())
5079         return FixedScalableVFPair(
5080             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5081       else
5082         return UserVF;
5083     }
5084 
5085     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5086 
5087     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5088     // is better to ignore the hint and let the compiler choose a suitable VF.
5089     if (!UserVF.isScalable()) {
5090       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5091                         << " is unsafe, clamping to max safe VF="
5092                         << MaxSafeFixedVF << ".\n");
5093       ORE->emit([&]() {
5094         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5095                                           TheLoop->getStartLoc(),
5096                                           TheLoop->getHeader())
5097                << "User-specified vectorization factor "
5098                << ore::NV("UserVectorizationFactor", UserVF)
5099                << " is unsafe, clamping to maximum safe vectorization factor "
5100                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5101       });
5102       return MaxSafeFixedVF;
5103     }
5104 
5105     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5106       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5107                         << " is ignored because scalable vectors are not "
5108                            "available.\n");
5109       ORE->emit([&]() {
5110         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5111                                           TheLoop->getStartLoc(),
5112                                           TheLoop->getHeader())
5113                << "User-specified vectorization factor "
5114                << ore::NV("UserVectorizationFactor", UserVF)
5115                << " is ignored because the target does not support scalable "
5116                   "vectors. The compiler will pick a more suitable value.";
5117       });
5118     } else {
5119       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5120                         << " is unsafe. Ignoring scalable UserVF.\n");
5121       ORE->emit([&]() {
5122         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5123                                           TheLoop->getStartLoc(),
5124                                           TheLoop->getHeader())
5125                << "User-specified vectorization factor "
5126                << ore::NV("UserVectorizationFactor", UserVF)
5127                << " is unsafe. Ignoring the hint to let the compiler pick a "
5128                   "more suitable value.";
5129       });
5130     }
5131   }
5132 
5133   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5134                     << " / " << WidestType << " bits.\n");
5135 
5136   FixedScalableVFPair Result(ElementCount::getFixed(1),
5137                              ElementCount::getScalable(0));
5138   if (auto MaxVF =
5139           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5140                                   MaxSafeFixedVF, FoldTailByMasking))
5141     Result.FixedVF = MaxVF;
5142 
5143   if (auto MaxVF =
5144           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5145                                   MaxSafeScalableVF, FoldTailByMasking))
5146     if (MaxVF.isScalable()) {
5147       Result.ScalableVF = MaxVF;
5148       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5149                         << "\n");
5150     }
5151 
5152   return Result;
5153 }
5154 
5155 FixedScalableVFPair
5156 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5157   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5158     // TODO: It may by useful to do since it's still likely to be dynamically
5159     // uniform if the target can skip.
5160     reportVectorizationFailure(
5161         "Not inserting runtime ptr check for divergent target",
5162         "runtime pointer checks needed. Not enabled for divergent target",
5163         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5164     return FixedScalableVFPair::getNone();
5165   }
5166 
5167   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5168   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5169   if (TC == 1) {
5170     reportVectorizationFailure("Single iteration (non) loop",
5171         "loop trip count is one, irrelevant for vectorization",
5172         "SingleIterationLoop", ORE, TheLoop);
5173     return FixedScalableVFPair::getNone();
5174   }
5175 
5176   switch (ScalarEpilogueStatus) {
5177   case CM_ScalarEpilogueAllowed:
5178     return computeFeasibleMaxVF(TC, UserVF, false);
5179   case CM_ScalarEpilogueNotAllowedUsePredicate:
5180     LLVM_FALLTHROUGH;
5181   case CM_ScalarEpilogueNotNeededUsePredicate:
5182     LLVM_DEBUG(
5183         dbgs() << "LV: vector predicate hint/switch found.\n"
5184                << "LV: Not allowing scalar epilogue, creating predicated "
5185                << "vector loop.\n");
5186     break;
5187   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5188     // fallthrough as a special case of OptForSize
5189   case CM_ScalarEpilogueNotAllowedOptSize:
5190     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5191       LLVM_DEBUG(
5192           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5193     else
5194       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5195                         << "count.\n");
5196 
5197     // Bail if runtime checks are required, which are not good when optimising
5198     // for size.
5199     if (runtimeChecksRequired())
5200       return FixedScalableVFPair::getNone();
5201 
5202     break;
5203   }
5204 
5205   // The only loops we can vectorize without a scalar epilogue, are loops with
5206   // a bottom-test and a single exiting block. We'd have to handle the fact
5207   // that not every instruction executes on the last iteration.  This will
5208   // require a lane mask which varies through the vector loop body.  (TODO)
5209   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5210     // If there was a tail-folding hint/switch, but we can't fold the tail by
5211     // masking, fallback to a vectorization with a scalar epilogue.
5212     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5213       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5214                            "scalar epilogue instead.\n");
5215       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5216       return computeFeasibleMaxVF(TC, UserVF, false);
5217     }
5218     return FixedScalableVFPair::getNone();
5219   }
5220 
5221   // Now try the tail folding
5222 
5223   // Invalidate interleave groups that require an epilogue if we can't mask
5224   // the interleave-group.
5225   if (!useMaskedInterleavedAccesses(TTI)) {
5226     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5227            "No decisions should have been taken at this point");
5228     // Note: There is no need to invalidate any cost modeling decisions here, as
5229     // non where taken so far.
5230     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5231   }
5232 
5233   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5234   // Avoid tail folding if the trip count is known to be a multiple of any VF
5235   // we chose.
5236   // FIXME: The condition below pessimises the case for fixed-width vectors,
5237   // when scalable VFs are also candidates for vectorization.
5238   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5239     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5240     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5241            "MaxFixedVF must be a power of 2");
5242     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5243                                    : MaxFixedVF.getFixedValue();
5244     ScalarEvolution *SE = PSE.getSE();
5245     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5246     const SCEV *ExitCount = SE->getAddExpr(
5247         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5248     const SCEV *Rem = SE->getURemExpr(
5249         SE->applyLoopGuards(ExitCount, TheLoop),
5250         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5251     if (Rem->isZero()) {
5252       // Accept MaxFixedVF if we do not have a tail.
5253       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5254       return MaxFactors;
5255     }
5256   }
5257 
5258   // For scalable vectors don't use tail folding for low trip counts or
5259   // optimizing for code size. We only permit this if the user has explicitly
5260   // requested it.
5261   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5262       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5263       MaxFactors.ScalableVF.isVector())
5264     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5265 
5266   // If we don't know the precise trip count, or if the trip count that we
5267   // found modulo the vectorization factor is not zero, try to fold the tail
5268   // by masking.
5269   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5270   if (Legal->prepareToFoldTailByMasking()) {
5271     FoldTailByMasking = true;
5272     return MaxFactors;
5273   }
5274 
5275   // If there was a tail-folding hint/switch, but we can't fold the tail by
5276   // masking, fallback to a vectorization with a scalar epilogue.
5277   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5278     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5279                          "scalar epilogue instead.\n");
5280     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5281     return MaxFactors;
5282   }
5283 
5284   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5285     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5286     return FixedScalableVFPair::getNone();
5287   }
5288 
5289   if (TC == 0) {
5290     reportVectorizationFailure(
5291         "Unable to calculate the loop count due to complex control flow",
5292         "unable to calculate the loop count due to complex control flow",
5293         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5294     return FixedScalableVFPair::getNone();
5295   }
5296 
5297   reportVectorizationFailure(
5298       "Cannot optimize for size and vectorize at the same time.",
5299       "cannot optimize for size and vectorize at the same time. "
5300       "Enable vectorization of this loop with '#pragma clang loop "
5301       "vectorize(enable)' when compiling with -Os/-Oz",
5302       "NoTailLoopWithOptForSize", ORE, TheLoop);
5303   return FixedScalableVFPair::getNone();
5304 }
5305 
5306 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5307     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5308     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5309   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5310   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5311       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5312                            : TargetTransformInfo::RGK_FixedWidthVector);
5313 
5314   // Convenience function to return the minimum of two ElementCounts.
5315   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5316     assert((LHS.isScalable() == RHS.isScalable()) &&
5317            "Scalable flags must match");
5318     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5319   };
5320 
5321   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5322   // Note that both WidestRegister and WidestType may not be a powers of 2.
5323   auto MaxVectorElementCount = ElementCount::get(
5324       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5325       ComputeScalableMaxVF);
5326   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5327   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5328                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5329 
5330   if (!MaxVectorElementCount) {
5331     LLVM_DEBUG(dbgs() << "LV: The target has no "
5332                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5333                       << " vector registers.\n");
5334     return ElementCount::getFixed(1);
5335   }
5336 
5337   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5338   if (ConstTripCount &&
5339       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5340       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5341     // If loop trip count (TC) is known at compile time there is no point in
5342     // choosing VF greater than TC (as done in the loop below). Select maximum
5343     // power of two which doesn't exceed TC.
5344     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5345     // when the TC is less than or equal to the known number of lanes.
5346     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5347     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5348                          "exceeding the constant trip count: "
5349                       << ClampedConstTripCount << "\n");
5350     return ElementCount::getFixed(ClampedConstTripCount);
5351   }
5352 
5353   ElementCount MaxVF = MaxVectorElementCount;
5354   if (TTI.shouldMaximizeVectorBandwidth() ||
5355       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5356     auto MaxVectorElementCountMaxBW = ElementCount::get(
5357         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5358         ComputeScalableMaxVF);
5359     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5360 
5361     // Collect all viable vectorization factors larger than the default MaxVF
5362     // (i.e. MaxVectorElementCount).
5363     SmallVector<ElementCount, 8> VFs;
5364     for (ElementCount VS = MaxVectorElementCount * 2;
5365          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5366       VFs.push_back(VS);
5367 
5368     // For each VF calculate its register usage.
5369     auto RUs = calculateRegisterUsage(VFs);
5370 
5371     // Select the largest VF which doesn't require more registers than existing
5372     // ones.
5373     for (int i = RUs.size() - 1; i >= 0; --i) {
5374       bool Selected = true;
5375       for (auto &pair : RUs[i].MaxLocalUsers) {
5376         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5377         if (pair.second > TargetNumRegisters)
5378           Selected = false;
5379       }
5380       if (Selected) {
5381         MaxVF = VFs[i];
5382         break;
5383       }
5384     }
5385     if (ElementCount MinVF =
5386             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5387       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5388         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5389                           << ") with target's minimum: " << MinVF << '\n');
5390         MaxVF = MinVF;
5391       }
5392     }
5393   }
5394   return MaxVF;
5395 }
5396 
5397 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
5398   if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5399     auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
5400     auto Min = Attr.getVScaleRangeMin();
5401     auto Max = Attr.getVScaleRangeMax();
5402     if (Max && Min == Max)
5403       return Max;
5404   }
5405 
5406   return TTI.getVScaleForTuning();
5407 }
5408 
5409 bool LoopVectorizationCostModel::isMoreProfitable(
5410     const VectorizationFactor &A, const VectorizationFactor &B) const {
5411   InstructionCost CostA = A.Cost;
5412   InstructionCost CostB = B.Cost;
5413 
5414   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5415 
5416   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5417       MaxTripCount) {
5418     // If we are folding the tail and the trip count is a known (possibly small)
5419     // constant, the trip count will be rounded up to an integer number of
5420     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5421     // which we compare directly. When not folding the tail, the total cost will
5422     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5423     // approximated with the per-lane cost below instead of using the tripcount
5424     // as here.
5425     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5426     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5427     return RTCostA < RTCostB;
5428   }
5429 
5430   // Improve estimate for the vector width if it is scalable.
5431   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5432   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5433   if (Optional<unsigned> VScale = getVScaleForTuning()) {
5434     if (A.Width.isScalable())
5435       EstimatedWidthA *= VScale.getValue();
5436     if (B.Width.isScalable())
5437       EstimatedWidthB *= VScale.getValue();
5438   }
5439 
5440   // Assume vscale may be larger than 1 (or the value being tuned for),
5441   // so that scalable vectorization is slightly favorable over fixed-width
5442   // vectorization.
5443   if (A.Width.isScalable() && !B.Width.isScalable())
5444     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5445 
5446   // To avoid the need for FP division:
5447   //      (CostA / A.Width) < (CostB / B.Width)
5448   // <=>  (CostA * B.Width) < (CostB * A.Width)
5449   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5450 }
5451 
5452 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5453     const ElementCountSet &VFCandidates) {
5454   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5455   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5456   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5457   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5458          "Expected Scalar VF to be a candidate");
5459 
5460   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5461   VectorizationFactor ChosenFactor = ScalarCost;
5462 
5463   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5464   if (ForceVectorization && VFCandidates.size() > 1) {
5465     // Ignore scalar width, because the user explicitly wants vectorization.
5466     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5467     // evaluation.
5468     ChosenFactor.Cost = InstructionCost::getMax();
5469   }
5470 
5471   SmallVector<InstructionVFPair> InvalidCosts;
5472   for (const auto &i : VFCandidates) {
5473     // The cost for scalar VF=1 is already calculated, so ignore it.
5474     if (i.isScalar())
5475       continue;
5476 
5477     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5478     VectorizationFactor Candidate(i, C.first);
5479 
5480 #ifndef NDEBUG
5481     unsigned AssumedMinimumVscale = 1;
5482     if (Optional<unsigned> VScale = getVScaleForTuning())
5483       AssumedMinimumVscale = VScale.getValue();
5484     unsigned Width =
5485         Candidate.Width.isScalable()
5486             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5487             : Candidate.Width.getFixedValue();
5488     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5489                       << " costs: " << (Candidate.Cost / Width));
5490     if (i.isScalable())
5491       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5492                         << AssumedMinimumVscale << ")");
5493     LLVM_DEBUG(dbgs() << ".\n");
5494 #endif
5495 
5496     if (!C.second && !ForceVectorization) {
5497       LLVM_DEBUG(
5498           dbgs() << "LV: Not considering vector loop of width " << i
5499                  << " because it will not generate any vector instructions.\n");
5500       continue;
5501     }
5502 
5503     // If profitable add it to ProfitableVF list.
5504     if (isMoreProfitable(Candidate, ScalarCost))
5505       ProfitableVFs.push_back(Candidate);
5506 
5507     if (isMoreProfitable(Candidate, ChosenFactor))
5508       ChosenFactor = Candidate;
5509   }
5510 
5511   // Emit a report of VFs with invalid costs in the loop.
5512   if (!InvalidCosts.empty()) {
5513     // Group the remarks per instruction, keeping the instruction order from
5514     // InvalidCosts.
5515     std::map<Instruction *, unsigned> Numbering;
5516     unsigned I = 0;
5517     for (auto &Pair : InvalidCosts)
5518       if (!Numbering.count(Pair.first))
5519         Numbering[Pair.first] = I++;
5520 
5521     // Sort the list, first on instruction(number) then on VF.
5522     llvm::sort(InvalidCosts,
5523                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5524                  if (Numbering[A.first] != Numbering[B.first])
5525                    return Numbering[A.first] < Numbering[B.first];
5526                  ElementCountComparator ECC;
5527                  return ECC(A.second, B.second);
5528                });
5529 
5530     // For a list of ordered instruction-vf pairs:
5531     //   [(load, vf1), (load, vf2), (store, vf1)]
5532     // Group the instructions together to emit separate remarks for:
5533     //   load  (vf1, vf2)
5534     //   store (vf1)
5535     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5536     auto Subset = ArrayRef<InstructionVFPair>();
5537     do {
5538       if (Subset.empty())
5539         Subset = Tail.take_front(1);
5540 
5541       Instruction *I = Subset.front().first;
5542 
5543       // If the next instruction is different, or if there are no other pairs,
5544       // emit a remark for the collated subset. e.g.
5545       //   [(load, vf1), (load, vf2))]
5546       // to emit:
5547       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5548       if (Subset == Tail || Tail[Subset.size()].first != I) {
5549         std::string OutString;
5550         raw_string_ostream OS(OutString);
5551         assert(!Subset.empty() && "Unexpected empty range");
5552         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5553         for (auto &Pair : Subset)
5554           OS << (Pair.second == Subset.front().second ? "" : ", ")
5555              << Pair.second;
5556         OS << "):";
5557         if (auto *CI = dyn_cast<CallInst>(I))
5558           OS << " call to " << CI->getCalledFunction()->getName();
5559         else
5560           OS << " " << I->getOpcodeName();
5561         OS.flush();
5562         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5563         Tail = Tail.drop_front(Subset.size());
5564         Subset = {};
5565       } else
5566         // Grow the subset by one element
5567         Subset = Tail.take_front(Subset.size() + 1);
5568     } while (!Tail.empty());
5569   }
5570 
5571   if (!EnableCondStoresVectorization && NumPredStores) {
5572     reportVectorizationFailure("There are conditional stores.",
5573         "store that is conditionally executed prevents vectorization",
5574         "ConditionalStore", ORE, TheLoop);
5575     ChosenFactor = ScalarCost;
5576   }
5577 
5578   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5579                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5580              << "LV: Vectorization seems to be not beneficial, "
5581              << "but was forced by a user.\n");
5582   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5583   return ChosenFactor;
5584 }
5585 
5586 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5587     const Loop &L, ElementCount VF) const {
5588   // Cross iteration phis such as reductions need special handling and are
5589   // currently unsupported.
5590   if (any_of(L.getHeader()->phis(),
5591              [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
5592     return false;
5593 
5594   // Phis with uses outside of the loop require special handling and are
5595   // currently unsupported.
5596   for (auto &Entry : Legal->getInductionVars()) {
5597     // Look for uses of the value of the induction at the last iteration.
5598     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5599     for (User *U : PostInc->users())
5600       if (!L.contains(cast<Instruction>(U)))
5601         return false;
5602     // Look for uses of penultimate value of the induction.
5603     for (User *U : Entry.first->users())
5604       if (!L.contains(cast<Instruction>(U)))
5605         return false;
5606   }
5607 
5608   // Induction variables that are widened require special handling that is
5609   // currently not supported.
5610   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5611         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5612                  this->isProfitableToScalarize(Entry.first, VF));
5613       }))
5614     return false;
5615 
5616   // Epilogue vectorization code has not been auditted to ensure it handles
5617   // non-latch exits properly.  It may be fine, but it needs auditted and
5618   // tested.
5619   if (L.getExitingBlock() != L.getLoopLatch())
5620     return false;
5621 
5622   return true;
5623 }
5624 
5625 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5626     const ElementCount VF) const {
5627   // FIXME: We need a much better cost-model to take different parameters such
5628   // as register pressure, code size increase and cost of extra branches into
5629   // account. For now we apply a very crude heuristic and only consider loops
5630   // with vectorization factors larger than a certain value.
5631   // We also consider epilogue vectorization unprofitable for targets that don't
5632   // consider interleaving beneficial (eg. MVE).
5633   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5634     return false;
5635   // FIXME: We should consider changing the threshold for scalable
5636   // vectors to take VScaleForTuning into account.
5637   if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
5638     return true;
5639   return false;
5640 }
5641 
5642 VectorizationFactor
5643 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5644     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5645   VectorizationFactor Result = VectorizationFactor::Disabled();
5646   if (!EnableEpilogueVectorization) {
5647     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5648     return Result;
5649   }
5650 
5651   if (!isScalarEpilogueAllowed()) {
5652     LLVM_DEBUG(
5653         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5654                   "allowed.\n";);
5655     return Result;
5656   }
5657 
5658   // Not really a cost consideration, but check for unsupported cases here to
5659   // simplify the logic.
5660   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5661     LLVM_DEBUG(
5662         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5663                   "not a supported candidate.\n";);
5664     return Result;
5665   }
5666 
5667   if (EpilogueVectorizationForceVF > 1) {
5668     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5669     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5670     if (LVP.hasPlanWithVF(ForcedEC))
5671       return {ForcedEC, 0};
5672     else {
5673       LLVM_DEBUG(
5674           dbgs()
5675               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5676       return Result;
5677     }
5678   }
5679 
5680   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5681       TheLoop->getHeader()->getParent()->hasMinSize()) {
5682     LLVM_DEBUG(
5683         dbgs()
5684             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5685     return Result;
5686   }
5687 
5688   if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
5689     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5690                          "this loop\n");
5691     return Result;
5692   }
5693 
5694   // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
5695   // the main loop handles 8 lanes per iteration. We could still benefit from
5696   // vectorizing the epilogue loop with VF=4.
5697   ElementCount EstimatedRuntimeVF = MainLoopVF;
5698   if (MainLoopVF.isScalable()) {
5699     EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5700     if (Optional<unsigned> VScale = getVScaleForTuning())
5701       EstimatedRuntimeVF *= VScale.getValue();
5702   }
5703 
5704   for (auto &NextVF : ProfitableVFs)
5705     if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
5706           ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
5707          ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
5708         (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
5709         LVP.hasPlanWithVF(NextVF.Width))
5710       Result = NextVF;
5711 
5712   if (Result != VectorizationFactor::Disabled())
5713     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5714                       << Result.Width << "\n";);
5715   return Result;
5716 }
5717 
5718 std::pair<unsigned, unsigned>
5719 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5720   unsigned MinWidth = -1U;
5721   unsigned MaxWidth = 8;
5722   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5723   // For in-loop reductions, no element types are added to ElementTypesInLoop
5724   // if there are no loads/stores in the loop. In this case, check through the
5725   // reduction variables to determine the maximum width.
5726   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5727     // Reset MaxWidth so that we can find the smallest type used by recurrences
5728     // in the loop.
5729     MaxWidth = -1U;
5730     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5731       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5732       // When finding the min width used by the recurrence we need to account
5733       // for casts on the input operands of the recurrence.
5734       MaxWidth = std::min<unsigned>(
5735           MaxWidth, std::min<unsigned>(
5736                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5737                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5738     }
5739   } else {
5740     for (Type *T : ElementTypesInLoop) {
5741       MinWidth = std::min<unsigned>(
5742           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5743       MaxWidth = std::max<unsigned>(
5744           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5745     }
5746   }
5747   return {MinWidth, MaxWidth};
5748 }
5749 
5750 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5751   ElementTypesInLoop.clear();
5752   // For each block.
5753   for (BasicBlock *BB : TheLoop->blocks()) {
5754     // For each instruction in the loop.
5755     for (Instruction &I : BB->instructionsWithoutDebug()) {
5756       Type *T = I.getType();
5757 
5758       // Skip ignored values.
5759       if (ValuesToIgnore.count(&I))
5760         continue;
5761 
5762       // Only examine Loads, Stores and PHINodes.
5763       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5764         continue;
5765 
5766       // Examine PHI nodes that are reduction variables. Update the type to
5767       // account for the recurrence type.
5768       if (auto *PN = dyn_cast<PHINode>(&I)) {
5769         if (!Legal->isReductionVariable(PN))
5770           continue;
5771         const RecurrenceDescriptor &RdxDesc =
5772             Legal->getReductionVars().find(PN)->second;
5773         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5774             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5775                                       RdxDesc.getRecurrenceType(),
5776                                       TargetTransformInfo::ReductionFlags()))
5777           continue;
5778         T = RdxDesc.getRecurrenceType();
5779       }
5780 
5781       // Examine the stored values.
5782       if (auto *ST = dyn_cast<StoreInst>(&I))
5783         T = ST->getValueOperand()->getType();
5784 
5785       assert(T->isSized() &&
5786              "Expected the load/store/recurrence type to be sized");
5787 
5788       ElementTypesInLoop.insert(T);
5789     }
5790   }
5791 }
5792 
5793 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5794                                                            unsigned LoopCost) {
5795   // -- The interleave heuristics --
5796   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5797   // There are many micro-architectural considerations that we can't predict
5798   // at this level. For example, frontend pressure (on decode or fetch) due to
5799   // code size, or the number and capabilities of the execution ports.
5800   //
5801   // We use the following heuristics to select the interleave count:
5802   // 1. If the code has reductions, then we interleave to break the cross
5803   // iteration dependency.
5804   // 2. If the loop is really small, then we interleave to reduce the loop
5805   // overhead.
5806   // 3. We don't interleave if we think that we will spill registers to memory
5807   // due to the increased register pressure.
5808 
5809   if (!isScalarEpilogueAllowed())
5810     return 1;
5811 
5812   // We used the distance for the interleave count.
5813   if (Legal->getMaxSafeDepDistBytes() != -1U)
5814     return 1;
5815 
5816   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5817   const bool HasReductions = !Legal->getReductionVars().empty();
5818   // Do not interleave loops with a relatively small known or estimated trip
5819   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5820   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5821   // because with the above conditions interleaving can expose ILP and break
5822   // cross iteration dependences for reductions.
5823   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5824       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5825     return 1;
5826 
5827   RegisterUsage R = calculateRegisterUsage({VF})[0];
5828   // We divide by these constants so assume that we have at least one
5829   // instruction that uses at least one register.
5830   for (auto& pair : R.MaxLocalUsers) {
5831     pair.second = std::max(pair.second, 1U);
5832   }
5833 
5834   // We calculate the interleave count using the following formula.
5835   // Subtract the number of loop invariants from the number of available
5836   // registers. These registers are used by all of the interleaved instances.
5837   // Next, divide the remaining registers by the number of registers that is
5838   // required by the loop, in order to estimate how many parallel instances
5839   // fit without causing spills. All of this is rounded down if necessary to be
5840   // a power of two. We want power of two interleave count to simplify any
5841   // addressing operations or alignment considerations.
5842   // We also want power of two interleave counts to ensure that the induction
5843   // variable of the vector loop wraps to zero, when tail is folded by masking;
5844   // this currently happens when OptForSize, in which case IC is set to 1 above.
5845   unsigned IC = UINT_MAX;
5846 
5847   for (auto& pair : R.MaxLocalUsers) {
5848     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5849     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5850                       << " registers of "
5851                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5852     if (VF.isScalar()) {
5853       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5854         TargetNumRegisters = ForceTargetNumScalarRegs;
5855     } else {
5856       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5857         TargetNumRegisters = ForceTargetNumVectorRegs;
5858     }
5859     unsigned MaxLocalUsers = pair.second;
5860     unsigned LoopInvariantRegs = 0;
5861     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5862       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5863 
5864     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5865     // Don't count the induction variable as interleaved.
5866     if (EnableIndVarRegisterHeur) {
5867       TmpIC =
5868           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5869                         std::max(1U, (MaxLocalUsers - 1)));
5870     }
5871 
5872     IC = std::min(IC, TmpIC);
5873   }
5874 
5875   // Clamp the interleave ranges to reasonable counts.
5876   unsigned MaxInterleaveCount =
5877       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
5878 
5879   // Check if the user has overridden the max.
5880   if (VF.isScalar()) {
5881     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5882       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5883   } else {
5884     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5885       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5886   }
5887 
5888   // If trip count is known or estimated compile time constant, limit the
5889   // interleave count to be less than the trip count divided by VF, provided it
5890   // is at least 1.
5891   //
5892   // For scalable vectors we can't know if interleaving is beneficial. It may
5893   // not be beneficial for small loops if none of the lanes in the second vector
5894   // iterations is enabled. However, for larger loops, there is likely to be a
5895   // similar benefit as for fixed-width vectors. For now, we choose to leave
5896   // the InterleaveCount as if vscale is '1', although if some information about
5897   // the vector is known (e.g. min vector size), we can make a better decision.
5898   if (BestKnownTC) {
5899     MaxInterleaveCount =
5900         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
5901     // Make sure MaxInterleaveCount is greater than 0.
5902     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
5903   }
5904 
5905   assert(MaxInterleaveCount > 0 &&
5906          "Maximum interleave count must be greater than 0");
5907 
5908   // Clamp the calculated IC to be between the 1 and the max interleave count
5909   // that the target and trip count allows.
5910   if (IC > MaxInterleaveCount)
5911     IC = MaxInterleaveCount;
5912   else
5913     // Make sure IC is greater than 0.
5914     IC = std::max(1u, IC);
5915 
5916   assert(IC > 0 && "Interleave count must be greater than 0.");
5917 
5918   // If we did not calculate the cost for VF (because the user selected the VF)
5919   // then we calculate the cost of VF here.
5920   if (LoopCost == 0) {
5921     InstructionCost C = expectedCost(VF).first;
5922     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
5923     LoopCost = *C.getValue();
5924   }
5925 
5926   assert(LoopCost && "Non-zero loop cost expected");
5927 
5928   // Interleave if we vectorized this loop and there is a reduction that could
5929   // benefit from interleaving.
5930   if (VF.isVector() && HasReductions) {
5931     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5932     return IC;
5933   }
5934 
5935   // For any scalar loop that either requires runtime checks or predication we
5936   // are better off leaving this to the unroller. Note that if we've already
5937   // vectorized the loop we will have done the runtime check and so interleaving
5938   // won't require further checks.
5939   bool ScalarInterleavingRequiresPredication =
5940       (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
5941          return Legal->blockNeedsPredication(BB);
5942        }));
5943   bool ScalarInterleavingRequiresRuntimePointerCheck =
5944       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
5945 
5946   // We want to interleave small loops in order to reduce the loop overhead and
5947   // potentially expose ILP opportunities.
5948   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
5949                     << "LV: IC is " << IC << '\n'
5950                     << "LV: VF is " << VF << '\n');
5951   const bool AggressivelyInterleaveReductions =
5952       TTI.enableAggressiveInterleaving(HasReductions);
5953   if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5954       !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
5955     // We assume that the cost overhead is 1 and we use the cost model
5956     // to estimate the cost of the loop and interleave until the cost of the
5957     // loop overhead is about 5% of the cost of the loop.
5958     unsigned SmallIC =
5959         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5960 
5961     // Interleave until store/load ports (estimated by max interleave count) are
5962     // saturated.
5963     unsigned NumStores = Legal->getNumStores();
5964     unsigned NumLoads = Legal->getNumLoads();
5965     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5966     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5967 
5968     // There is little point in interleaving for reductions containing selects
5969     // and compares when VF=1 since it may just create more overhead than it's
5970     // worth for loops with small trip counts. This is because we still have to
5971     // do the final reduction after the loop.
5972     bool HasSelectCmpReductions =
5973         HasReductions &&
5974         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5975           const RecurrenceDescriptor &RdxDesc = Reduction.second;
5976           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
5977               RdxDesc.getRecurrenceKind());
5978         });
5979     if (HasSelectCmpReductions) {
5980       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
5981       return 1;
5982     }
5983 
5984     // If we have a scalar reduction (vector reductions are already dealt with
5985     // by this point), we can increase the critical path length if the loop
5986     // we're interleaving is inside another loop. For tree-wise reductions
5987     // set the limit to 2, and for ordered reductions it's best to disable
5988     // interleaving entirely.
5989     if (HasReductions && TheLoop->getLoopDepth() > 1) {
5990       bool HasOrderedReductions =
5991           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5992             const RecurrenceDescriptor &RdxDesc = Reduction.second;
5993             return RdxDesc.isOrdered();
5994           });
5995       if (HasOrderedReductions) {
5996         LLVM_DEBUG(
5997             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
5998         return 1;
5999       }
6000 
6001       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6002       SmallIC = std::min(SmallIC, F);
6003       StoresIC = std::min(StoresIC, F);
6004       LoadsIC = std::min(LoadsIC, F);
6005     }
6006 
6007     if (EnableLoadStoreRuntimeInterleave &&
6008         std::max(StoresIC, LoadsIC) > SmallIC) {
6009       LLVM_DEBUG(
6010           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6011       return std::max(StoresIC, LoadsIC);
6012     }
6013 
6014     // If there are scalar reductions and TTI has enabled aggressive
6015     // interleaving for reductions, we will interleave to expose ILP.
6016     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6017         AggressivelyInterleaveReductions) {
6018       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6019       // Interleave no less than SmallIC but not as aggressive as the normal IC
6020       // to satisfy the rare situation when resources are too limited.
6021       return std::max(IC / 2, SmallIC);
6022     } else {
6023       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6024       return SmallIC;
6025     }
6026   }
6027 
6028   // Interleave if this is a large loop (small loops are already dealt with by
6029   // this point) that could benefit from interleaving.
6030   if (AggressivelyInterleaveReductions) {
6031     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6032     return IC;
6033   }
6034 
6035   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6036   return 1;
6037 }
6038 
6039 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6040 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6041   // This function calculates the register usage by measuring the highest number
6042   // of values that are alive at a single location. Obviously, this is a very
6043   // rough estimation. We scan the loop in a topological order in order and
6044   // assign a number to each instruction. We use RPO to ensure that defs are
6045   // met before their users. We assume that each instruction that has in-loop
6046   // users starts an interval. We record every time that an in-loop value is
6047   // used, so we have a list of the first and last occurrences of each
6048   // instruction. Next, we transpose this data structure into a multi map that
6049   // holds the list of intervals that *end* at a specific location. This multi
6050   // map allows us to perform a linear search. We scan the instructions linearly
6051   // and record each time that a new interval starts, by placing it in a set.
6052   // If we find this value in the multi-map then we remove it from the set.
6053   // The max register usage is the maximum size of the set.
6054   // We also search for instructions that are defined outside the loop, but are
6055   // used inside the loop. We need this number separately from the max-interval
6056   // usage number because when we unroll, loop-invariant values do not take
6057   // more register.
6058   LoopBlocksDFS DFS(TheLoop);
6059   DFS.perform(LI);
6060 
6061   RegisterUsage RU;
6062 
6063   // Each 'key' in the map opens a new interval. The values
6064   // of the map are the index of the 'last seen' usage of the
6065   // instruction that is the key.
6066   using IntervalMap = DenseMap<Instruction *, unsigned>;
6067 
6068   // Maps instruction to its index.
6069   SmallVector<Instruction *, 64> IdxToInstr;
6070   // Marks the end of each interval.
6071   IntervalMap EndPoint;
6072   // Saves the list of instruction indices that are used in the loop.
6073   SmallPtrSet<Instruction *, 8> Ends;
6074   // Saves the list of values that are used in the loop but are
6075   // defined outside the loop, such as arguments and constants.
6076   SmallPtrSet<Value *, 8> LoopInvariants;
6077 
6078   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6079     for (Instruction &I : BB->instructionsWithoutDebug()) {
6080       IdxToInstr.push_back(&I);
6081 
6082       // Save the end location of each USE.
6083       for (Value *U : I.operands()) {
6084         auto *Instr = dyn_cast<Instruction>(U);
6085 
6086         // Ignore non-instruction values such as arguments, constants, etc.
6087         if (!Instr)
6088           continue;
6089 
6090         // If this instruction is outside the loop then record it and continue.
6091         if (!TheLoop->contains(Instr)) {
6092           LoopInvariants.insert(Instr);
6093           continue;
6094         }
6095 
6096         // Overwrite previous end points.
6097         EndPoint[Instr] = IdxToInstr.size();
6098         Ends.insert(Instr);
6099       }
6100     }
6101   }
6102 
6103   // Saves the list of intervals that end with the index in 'key'.
6104   using InstrList = SmallVector<Instruction *, 2>;
6105   DenseMap<unsigned, InstrList> TransposeEnds;
6106 
6107   // Transpose the EndPoints to a list of values that end at each index.
6108   for (auto &Interval : EndPoint)
6109     TransposeEnds[Interval.second].push_back(Interval.first);
6110 
6111   SmallPtrSet<Instruction *, 8> OpenIntervals;
6112   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6113   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6114 
6115   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6116 
6117   // A lambda that gets the register usage for the given type and VF.
6118   const auto &TTICapture = TTI;
6119   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6120     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6121       return 0;
6122     InstructionCost::CostType RegUsage =
6123         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6124     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6125            "Nonsensical values for register usage.");
6126     return RegUsage;
6127   };
6128 
6129   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6130     Instruction *I = IdxToInstr[i];
6131 
6132     // Remove all of the instructions that end at this location.
6133     InstrList &List = TransposeEnds[i];
6134     for (Instruction *ToRemove : List)
6135       OpenIntervals.erase(ToRemove);
6136 
6137     // Ignore instructions that are never used within the loop.
6138     if (!Ends.count(I))
6139       continue;
6140 
6141     // Skip ignored values.
6142     if (ValuesToIgnore.count(I))
6143       continue;
6144 
6145     // For each VF find the maximum usage of registers.
6146     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6147       // Count the number of live intervals.
6148       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6149 
6150       if (VFs[j].isScalar()) {
6151         for (auto Inst : OpenIntervals) {
6152           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6153           if (RegUsage.find(ClassID) == RegUsage.end())
6154             RegUsage[ClassID] = 1;
6155           else
6156             RegUsage[ClassID] += 1;
6157         }
6158       } else {
6159         collectUniformsAndScalars(VFs[j]);
6160         for (auto Inst : OpenIntervals) {
6161           // Skip ignored values for VF > 1.
6162           if (VecValuesToIgnore.count(Inst))
6163             continue;
6164           if (isScalarAfterVectorization(Inst, VFs[j])) {
6165             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6166             if (RegUsage.find(ClassID) == RegUsage.end())
6167               RegUsage[ClassID] = 1;
6168             else
6169               RegUsage[ClassID] += 1;
6170           } else {
6171             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6172             if (RegUsage.find(ClassID) == RegUsage.end())
6173               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6174             else
6175               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6176           }
6177         }
6178       }
6179 
6180       for (auto& pair : RegUsage) {
6181         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6182           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6183         else
6184           MaxUsages[j][pair.first] = pair.second;
6185       }
6186     }
6187 
6188     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6189                       << OpenIntervals.size() << '\n');
6190 
6191     // Add the current instruction to the list of open intervals.
6192     OpenIntervals.insert(I);
6193   }
6194 
6195   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6196     SmallMapVector<unsigned, unsigned, 4> Invariant;
6197 
6198     for (auto Inst : LoopInvariants) {
6199       unsigned Usage =
6200           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6201       unsigned ClassID =
6202           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6203       if (Invariant.find(ClassID) == Invariant.end())
6204         Invariant[ClassID] = Usage;
6205       else
6206         Invariant[ClassID] += Usage;
6207     }
6208 
6209     LLVM_DEBUG({
6210       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6211       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6212              << " item\n";
6213       for (const auto &pair : MaxUsages[i]) {
6214         dbgs() << "LV(REG): RegisterClass: "
6215                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6216                << " registers\n";
6217       }
6218       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6219              << " item\n";
6220       for (const auto &pair : Invariant) {
6221         dbgs() << "LV(REG): RegisterClass: "
6222                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6223                << " registers\n";
6224       }
6225     });
6226 
6227     RU.LoopInvariantRegs = Invariant;
6228     RU.MaxLocalUsers = MaxUsages[i];
6229     RUs[i] = RU;
6230   }
6231 
6232   return RUs;
6233 }
6234 
6235 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6236                                                            ElementCount VF) {
6237   // TODO: Cost model for emulated masked load/store is completely
6238   // broken. This hack guides the cost model to use an artificially
6239   // high enough value to practically disable vectorization with such
6240   // operations, except where previously deployed legality hack allowed
6241   // using very low cost values. This is to avoid regressions coming simply
6242   // from moving "masked load/store" check from legality to cost model.
6243   // Masked Load/Gather emulation was previously never allowed.
6244   // Limited number of Masked Store/Scatter emulation was allowed.
6245   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6246   return isa<LoadInst>(I) ||
6247          (isa<StoreInst>(I) &&
6248           NumPredStores > NumberOfStoresToPredicate);
6249 }
6250 
6251 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6252   // If we aren't vectorizing the loop, or if we've already collected the
6253   // instructions to scalarize, there's nothing to do. Collection may already
6254   // have occurred if we have a user-selected VF and are now computing the
6255   // expected cost for interleaving.
6256   if (VF.isScalar() || VF.isZero() ||
6257       InstsToScalarize.find(VF) != InstsToScalarize.end())
6258     return;
6259 
6260   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6261   // not profitable to scalarize any instructions, the presence of VF in the
6262   // map will indicate that we've analyzed it already.
6263   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6264 
6265   // Find all the instructions that are scalar with predication in the loop and
6266   // determine if it would be better to not if-convert the blocks they are in.
6267   // If so, we also record the instructions to scalarize.
6268   for (BasicBlock *BB : TheLoop->blocks()) {
6269     if (!blockNeedsPredicationForAnyReason(BB))
6270       continue;
6271     for (Instruction &I : *BB)
6272       if (isScalarWithPredication(&I, VF)) {
6273         ScalarCostsTy ScalarCosts;
6274         // Do not apply discount if scalable, because that would lead to
6275         // invalid scalarization costs.
6276         // Do not apply discount logic if hacked cost is needed
6277         // for emulated masked memrefs.
6278         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6279             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6280           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6281         // Remember that BB will remain after vectorization.
6282         PredicatedBBsAfterVectorization.insert(BB);
6283       }
6284   }
6285 }
6286 
6287 int LoopVectorizationCostModel::computePredInstDiscount(
6288     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6289   assert(!isUniformAfterVectorization(PredInst, VF) &&
6290          "Instruction marked uniform-after-vectorization will be predicated");
6291 
6292   // Initialize the discount to zero, meaning that the scalar version and the
6293   // vector version cost the same.
6294   InstructionCost Discount = 0;
6295 
6296   // Holds instructions to analyze. The instructions we visit are mapped in
6297   // ScalarCosts. Those instructions are the ones that would be scalarized if
6298   // we find that the scalar version costs less.
6299   SmallVector<Instruction *, 8> Worklist;
6300 
6301   // Returns true if the given instruction can be scalarized.
6302   auto canBeScalarized = [&](Instruction *I) -> bool {
6303     // We only attempt to scalarize instructions forming a single-use chain
6304     // from the original predicated block that would otherwise be vectorized.
6305     // Although not strictly necessary, we give up on instructions we know will
6306     // already be scalar to avoid traversing chains that are unlikely to be
6307     // beneficial.
6308     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6309         isScalarAfterVectorization(I, VF))
6310       return false;
6311 
6312     // If the instruction is scalar with predication, it will be analyzed
6313     // separately. We ignore it within the context of PredInst.
6314     if (isScalarWithPredication(I, VF))
6315       return false;
6316 
6317     // If any of the instruction's operands are uniform after vectorization,
6318     // the instruction cannot be scalarized. This prevents, for example, a
6319     // masked load from being scalarized.
6320     //
6321     // We assume we will only emit a value for lane zero of an instruction
6322     // marked uniform after vectorization, rather than VF identical values.
6323     // Thus, if we scalarize an instruction that uses a uniform, we would
6324     // create uses of values corresponding to the lanes we aren't emitting code
6325     // for. This behavior can be changed by allowing getScalarValue to clone
6326     // the lane zero values for uniforms rather than asserting.
6327     for (Use &U : I->operands())
6328       if (auto *J = dyn_cast<Instruction>(U.get()))
6329         if (isUniformAfterVectorization(J, VF))
6330           return false;
6331 
6332     // Otherwise, we can scalarize the instruction.
6333     return true;
6334   };
6335 
6336   // Compute the expected cost discount from scalarizing the entire expression
6337   // feeding the predicated instruction. We currently only consider expressions
6338   // that are single-use instruction chains.
6339   Worklist.push_back(PredInst);
6340   while (!Worklist.empty()) {
6341     Instruction *I = Worklist.pop_back_val();
6342 
6343     // If we've already analyzed the instruction, there's nothing to do.
6344     if (ScalarCosts.find(I) != ScalarCosts.end())
6345       continue;
6346 
6347     // Compute the cost of the vector instruction. Note that this cost already
6348     // includes the scalarization overhead of the predicated instruction.
6349     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6350 
6351     // Compute the cost of the scalarized instruction. This cost is the cost of
6352     // the instruction as if it wasn't if-converted and instead remained in the
6353     // predicated block. We will scale this cost by block probability after
6354     // computing the scalarization overhead.
6355     InstructionCost ScalarCost =
6356         VF.getFixedValue() *
6357         getInstructionCost(I, ElementCount::getFixed(1)).first;
6358 
6359     // Compute the scalarization overhead of needed insertelement instructions
6360     // and phi nodes.
6361     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6362       ScalarCost += TTI.getScalarizationOverhead(
6363           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6364           APInt::getAllOnes(VF.getFixedValue()), true, false);
6365       ScalarCost +=
6366           VF.getFixedValue() *
6367           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6368     }
6369 
6370     // Compute the scalarization overhead of needed extractelement
6371     // instructions. For each of the instruction's operands, if the operand can
6372     // be scalarized, add it to the worklist; otherwise, account for the
6373     // overhead.
6374     for (Use &U : I->operands())
6375       if (auto *J = dyn_cast<Instruction>(U.get())) {
6376         assert(VectorType::isValidElementType(J->getType()) &&
6377                "Instruction has non-scalar type");
6378         if (canBeScalarized(J))
6379           Worklist.push_back(J);
6380         else if (needsExtract(J, VF)) {
6381           ScalarCost += TTI.getScalarizationOverhead(
6382               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6383               APInt::getAllOnes(VF.getFixedValue()), false, true);
6384         }
6385       }
6386 
6387     // Scale the total scalar cost by block probability.
6388     ScalarCost /= getReciprocalPredBlockProb();
6389 
6390     // Compute the discount. A non-negative discount means the vector version
6391     // of the instruction costs more, and scalarizing would be beneficial.
6392     Discount += VectorCost - ScalarCost;
6393     ScalarCosts[I] = ScalarCost;
6394   }
6395 
6396   return *Discount.getValue();
6397 }
6398 
6399 LoopVectorizationCostModel::VectorizationCostTy
6400 LoopVectorizationCostModel::expectedCost(
6401     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6402   VectorizationCostTy Cost;
6403 
6404   // For each block.
6405   for (BasicBlock *BB : TheLoop->blocks()) {
6406     VectorizationCostTy BlockCost;
6407 
6408     // For each instruction in the old loop.
6409     for (Instruction &I : BB->instructionsWithoutDebug()) {
6410       // Skip ignored values.
6411       if (ValuesToIgnore.count(&I) ||
6412           (VF.isVector() && VecValuesToIgnore.count(&I)))
6413         continue;
6414 
6415       VectorizationCostTy C = getInstructionCost(&I, VF);
6416 
6417       // Check if we should override the cost.
6418       if (C.first.isValid() &&
6419           ForceTargetInstructionCost.getNumOccurrences() > 0)
6420         C.first = InstructionCost(ForceTargetInstructionCost);
6421 
6422       // Keep a list of instructions with invalid costs.
6423       if (Invalid && !C.first.isValid())
6424         Invalid->emplace_back(&I, VF);
6425 
6426       BlockCost.first += C.first;
6427       BlockCost.second |= C.second;
6428       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6429                         << " for VF " << VF << " For instruction: " << I
6430                         << '\n');
6431     }
6432 
6433     // If we are vectorizing a predicated block, it will have been
6434     // if-converted. This means that the block's instructions (aside from
6435     // stores and instructions that may divide by zero) will now be
6436     // unconditionally executed. For the scalar case, we may not always execute
6437     // the predicated block, if it is an if-else block. Thus, scale the block's
6438     // cost by the probability of executing it. blockNeedsPredication from
6439     // Legal is used so as to not include all blocks in tail folded loops.
6440     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6441       BlockCost.first /= getReciprocalPredBlockProb();
6442 
6443     Cost.first += BlockCost.first;
6444     Cost.second |= BlockCost.second;
6445   }
6446 
6447   return Cost;
6448 }
6449 
6450 /// Gets Address Access SCEV after verifying that the access pattern
6451 /// is loop invariant except the induction variable dependence.
6452 ///
6453 /// This SCEV can be sent to the Target in order to estimate the address
6454 /// calculation cost.
6455 static const SCEV *getAddressAccessSCEV(
6456               Value *Ptr,
6457               LoopVectorizationLegality *Legal,
6458               PredicatedScalarEvolution &PSE,
6459               const Loop *TheLoop) {
6460 
6461   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6462   if (!Gep)
6463     return nullptr;
6464 
6465   // We are looking for a gep with all loop invariant indices except for one
6466   // which should be an induction variable.
6467   auto SE = PSE.getSE();
6468   unsigned NumOperands = Gep->getNumOperands();
6469   for (unsigned i = 1; i < NumOperands; ++i) {
6470     Value *Opd = Gep->getOperand(i);
6471     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6472         !Legal->isInductionVariable(Opd))
6473       return nullptr;
6474   }
6475 
6476   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6477   return PSE.getSCEV(Ptr);
6478 }
6479 
6480 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6481   return Legal->hasStride(I->getOperand(0)) ||
6482          Legal->hasStride(I->getOperand(1));
6483 }
6484 
6485 InstructionCost
6486 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6487                                                         ElementCount VF) {
6488   assert(VF.isVector() &&
6489          "Scalarization cost of instruction implies vectorization.");
6490   if (VF.isScalable())
6491     return InstructionCost::getInvalid();
6492 
6493   Type *ValTy = getLoadStoreType(I);
6494   auto SE = PSE.getSE();
6495 
6496   unsigned AS = getLoadStoreAddressSpace(I);
6497   Value *Ptr = getLoadStorePointerOperand(I);
6498   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6499   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6500   //       that it is being called from this specific place.
6501 
6502   // Figure out whether the access is strided and get the stride value
6503   // if it's known in compile time
6504   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6505 
6506   // Get the cost of the scalar memory instruction and address computation.
6507   InstructionCost Cost =
6508       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6509 
6510   // Don't pass *I here, since it is scalar but will actually be part of a
6511   // vectorized loop where the user of it is a vectorized instruction.
6512   const Align Alignment = getLoadStoreAlignment(I);
6513   Cost += VF.getKnownMinValue() *
6514           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6515                               AS, TTI::TCK_RecipThroughput);
6516 
6517   // Get the overhead of the extractelement and insertelement instructions
6518   // we might create due to scalarization.
6519   Cost += getScalarizationOverhead(I, VF);
6520 
6521   // If we have a predicated load/store, it will need extra i1 extracts and
6522   // conditional branches, but may not be executed for each vector lane. Scale
6523   // the cost by the probability of executing the predicated block.
6524   if (isPredicatedInst(I, VF)) {
6525     Cost /= getReciprocalPredBlockProb();
6526 
6527     // Add the cost of an i1 extract and a branch
6528     auto *Vec_i1Ty =
6529         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6530     Cost += TTI.getScalarizationOverhead(
6531         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6532         /*Insert=*/false, /*Extract=*/true);
6533     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6534 
6535     if (useEmulatedMaskMemRefHack(I, VF))
6536       // Artificially setting to a high enough value to practically disable
6537       // vectorization with such operations.
6538       Cost = 3000000;
6539   }
6540 
6541   return Cost;
6542 }
6543 
6544 InstructionCost
6545 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6546                                                     ElementCount VF) {
6547   Type *ValTy = getLoadStoreType(I);
6548   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6549   Value *Ptr = getLoadStorePointerOperand(I);
6550   unsigned AS = getLoadStoreAddressSpace(I);
6551   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6552   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6553 
6554   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6555          "Stride should be 1 or -1 for consecutive memory access");
6556   const Align Alignment = getLoadStoreAlignment(I);
6557   InstructionCost Cost = 0;
6558   if (Legal->isMaskRequired(I))
6559     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6560                                       CostKind);
6561   else
6562     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6563                                 CostKind, I);
6564 
6565   bool Reverse = ConsecutiveStride < 0;
6566   if (Reverse)
6567     Cost +=
6568         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6569   return Cost;
6570 }
6571 
6572 InstructionCost
6573 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6574                                                 ElementCount VF) {
6575   assert(Legal->isUniformMemOp(*I));
6576 
6577   Type *ValTy = getLoadStoreType(I);
6578   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6579   const Align Alignment = getLoadStoreAlignment(I);
6580   unsigned AS = getLoadStoreAddressSpace(I);
6581   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6582   if (isa<LoadInst>(I)) {
6583     return TTI.getAddressComputationCost(ValTy) +
6584            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6585                                CostKind) +
6586            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6587   }
6588   StoreInst *SI = cast<StoreInst>(I);
6589 
6590   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6591   return TTI.getAddressComputationCost(ValTy) +
6592          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6593                              CostKind) +
6594          (isLoopInvariantStoreValue
6595               ? 0
6596               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6597                                        VF.getKnownMinValue() - 1));
6598 }
6599 
6600 InstructionCost
6601 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6602                                                  ElementCount VF) {
6603   Type *ValTy = getLoadStoreType(I);
6604   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6605   const Align Alignment = getLoadStoreAlignment(I);
6606   const Value *Ptr = getLoadStorePointerOperand(I);
6607 
6608   return TTI.getAddressComputationCost(VectorTy) +
6609          TTI.getGatherScatterOpCost(
6610              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6611              TargetTransformInfo::TCK_RecipThroughput, I);
6612 }
6613 
6614 InstructionCost
6615 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6616                                                    ElementCount VF) {
6617   // TODO: Once we have support for interleaving with scalable vectors
6618   // we can calculate the cost properly here.
6619   if (VF.isScalable())
6620     return InstructionCost::getInvalid();
6621 
6622   Type *ValTy = getLoadStoreType(I);
6623   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6624   unsigned AS = getLoadStoreAddressSpace(I);
6625 
6626   auto Group = getInterleavedAccessGroup(I);
6627   assert(Group && "Fail to get an interleaved access group.");
6628 
6629   unsigned InterleaveFactor = Group->getFactor();
6630   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6631 
6632   // Holds the indices of existing members in the interleaved group.
6633   SmallVector<unsigned, 4> Indices;
6634   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6635     if (Group->getMember(IF))
6636       Indices.push_back(IF);
6637 
6638   // Calculate the cost of the whole interleaved group.
6639   bool UseMaskForGaps =
6640       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6641       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6642   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6643       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6644       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6645 
6646   if (Group->isReverse()) {
6647     // TODO: Add support for reversed masked interleaved access.
6648     assert(!Legal->isMaskRequired(I) &&
6649            "Reverse masked interleaved access not supported.");
6650     Cost +=
6651         Group->getNumMembers() *
6652         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6653   }
6654   return Cost;
6655 }
6656 
6657 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6658     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6659   using namespace llvm::PatternMatch;
6660   // Early exit for no inloop reductions
6661   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6662     return None;
6663   auto *VectorTy = cast<VectorType>(Ty);
6664 
6665   // We are looking for a pattern of, and finding the minimal acceptable cost:
6666   //  reduce(mul(ext(A), ext(B))) or
6667   //  reduce(mul(A, B)) or
6668   //  reduce(ext(A)) or
6669   //  reduce(A).
6670   // The basic idea is that we walk down the tree to do that, finding the root
6671   // reduction instruction in InLoopReductionImmediateChains. From there we find
6672   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6673   // of the components. If the reduction cost is lower then we return it for the
6674   // reduction instruction and 0 for the other instructions in the pattern. If
6675   // it is not we return an invalid cost specifying the orignal cost method
6676   // should be used.
6677   Instruction *RetI = I;
6678   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6679     if (!RetI->hasOneUser())
6680       return None;
6681     RetI = RetI->user_back();
6682   }
6683   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6684       RetI->user_back()->getOpcode() == Instruction::Add) {
6685     if (!RetI->hasOneUser())
6686       return None;
6687     RetI = RetI->user_back();
6688   }
6689 
6690   // Test if the found instruction is a reduction, and if not return an invalid
6691   // cost specifying the parent to use the original cost modelling.
6692   if (!InLoopReductionImmediateChains.count(RetI))
6693     return None;
6694 
6695   // Find the reduction this chain is a part of and calculate the basic cost of
6696   // the reduction on its own.
6697   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6698   Instruction *ReductionPhi = LastChain;
6699   while (!isa<PHINode>(ReductionPhi))
6700     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6701 
6702   const RecurrenceDescriptor &RdxDesc =
6703       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6704 
6705   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6706       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6707 
6708   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6709   // normal fmul instruction to the cost of the fadd reduction.
6710   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6711     BaseCost +=
6712         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6713 
6714   // If we're using ordered reductions then we can just return the base cost
6715   // here, since getArithmeticReductionCost calculates the full ordered
6716   // reduction cost when FP reassociation is not allowed.
6717   if (useOrderedReductions(RdxDesc))
6718     return BaseCost;
6719 
6720   // Get the operand that was not the reduction chain and match it to one of the
6721   // patterns, returning the better cost if it is found.
6722   Instruction *RedOp = RetI->getOperand(1) == LastChain
6723                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6724                            : dyn_cast<Instruction>(RetI->getOperand(1));
6725 
6726   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6727 
6728   Instruction *Op0, *Op1;
6729   if (RedOp &&
6730       match(RedOp,
6731             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6732       match(Op0, m_ZExtOrSExt(m_Value())) &&
6733       Op0->getOpcode() == Op1->getOpcode() &&
6734       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6735       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6736       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6737 
6738     // Matched reduce(ext(mul(ext(A), ext(B)))
6739     // Note that the extend opcodes need to all match, or if A==B they will have
6740     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6741     // which is equally fine.
6742     bool IsUnsigned = isa<ZExtInst>(Op0);
6743     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6744     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6745 
6746     InstructionCost ExtCost =
6747         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6748                              TTI::CastContextHint::None, CostKind, Op0);
6749     InstructionCost MulCost =
6750         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6751     InstructionCost Ext2Cost =
6752         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6753                              TTI::CastContextHint::None, CostKind, RedOp);
6754 
6755     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6756         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6757         CostKind);
6758 
6759     if (RedCost.isValid() &&
6760         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6761       return I == RetI ? RedCost : 0;
6762   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6763              !TheLoop->isLoopInvariant(RedOp)) {
6764     // Matched reduce(ext(A))
6765     bool IsUnsigned = isa<ZExtInst>(RedOp);
6766     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6767     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6768         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6769         CostKind);
6770 
6771     InstructionCost ExtCost =
6772         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6773                              TTI::CastContextHint::None, CostKind, RedOp);
6774     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6775       return I == RetI ? RedCost : 0;
6776   } else if (RedOp &&
6777              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6778     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6779         Op0->getOpcode() == Op1->getOpcode() &&
6780         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6781       bool IsUnsigned = isa<ZExtInst>(Op0);
6782       Type *Op0Ty = Op0->getOperand(0)->getType();
6783       Type *Op1Ty = Op1->getOperand(0)->getType();
6784       Type *LargestOpTy =
6785           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6786                                                                     : Op0Ty;
6787       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6788 
6789       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6790       // different sizes. We take the largest type as the ext to reduce, and add
6791       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6792       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6793           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6794           TTI::CastContextHint::None, CostKind, Op0);
6795       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6796           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6797           TTI::CastContextHint::None, CostKind, Op1);
6798       InstructionCost MulCost =
6799           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6800 
6801       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6802           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6803           CostKind);
6804       InstructionCost ExtraExtCost = 0;
6805       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6806         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6807         ExtraExtCost = TTI.getCastInstrCost(
6808             ExtraExtOp->getOpcode(), ExtType,
6809             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
6810             TTI::CastContextHint::None, CostKind, ExtraExtOp);
6811       }
6812 
6813       if (RedCost.isValid() &&
6814           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6815         return I == RetI ? RedCost : 0;
6816     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
6817       // Matched reduce(mul())
6818       InstructionCost MulCost =
6819           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6820 
6821       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6822           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6823           CostKind);
6824 
6825       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6826         return I == RetI ? RedCost : 0;
6827     }
6828   }
6829 
6830   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
6831 }
6832 
6833 InstructionCost
6834 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6835                                                      ElementCount VF) {
6836   // Calculate scalar cost only. Vectorization cost should be ready at this
6837   // moment.
6838   if (VF.isScalar()) {
6839     Type *ValTy = getLoadStoreType(I);
6840     const Align Alignment = getLoadStoreAlignment(I);
6841     unsigned AS = getLoadStoreAddressSpace(I);
6842 
6843     return TTI.getAddressComputationCost(ValTy) +
6844            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6845                                TTI::TCK_RecipThroughput, I);
6846   }
6847   return getWideningCost(I, VF);
6848 }
6849 
6850 LoopVectorizationCostModel::VectorizationCostTy
6851 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6852                                                ElementCount VF) {
6853   // If we know that this instruction will remain uniform, check the cost of
6854   // the scalar version.
6855   if (isUniformAfterVectorization(I, VF))
6856     VF = ElementCount::getFixed(1);
6857 
6858   if (VF.isVector() && isProfitableToScalarize(I, VF))
6859     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6860 
6861   // Forced scalars do not have any scalarization overhead.
6862   auto ForcedScalar = ForcedScalars.find(VF);
6863   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6864     auto InstSet = ForcedScalar->second;
6865     if (InstSet.count(I))
6866       return VectorizationCostTy(
6867           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6868            VF.getKnownMinValue()),
6869           false);
6870   }
6871 
6872   Type *VectorTy;
6873   InstructionCost C = getInstructionCost(I, VF, VectorTy);
6874 
6875   bool TypeNotScalarized = false;
6876   if (VF.isVector() && VectorTy->isVectorTy()) {
6877     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
6878     if (NumParts)
6879       TypeNotScalarized = NumParts < VF.getKnownMinValue();
6880     else
6881       C = InstructionCost::getInvalid();
6882   }
6883   return VectorizationCostTy(C, TypeNotScalarized);
6884 }
6885 
6886 InstructionCost
6887 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6888                                                      ElementCount VF) const {
6889 
6890   // There is no mechanism yet to create a scalable scalarization loop,
6891   // so this is currently Invalid.
6892   if (VF.isScalable())
6893     return InstructionCost::getInvalid();
6894 
6895   if (VF.isScalar())
6896     return 0;
6897 
6898   InstructionCost Cost = 0;
6899   Type *RetTy = ToVectorTy(I->getType(), VF);
6900   if (!RetTy->isVoidTy() &&
6901       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6902     Cost += TTI.getScalarizationOverhead(
6903         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
6904         false);
6905 
6906   // Some targets keep addresses scalar.
6907   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6908     return Cost;
6909 
6910   // Some targets support efficient element stores.
6911   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6912     return Cost;
6913 
6914   // Collect operands to consider.
6915   CallInst *CI = dyn_cast<CallInst>(I);
6916   Instruction::op_range Ops = CI ? CI->args() : I->operands();
6917 
6918   // Skip operands that do not require extraction/scalarization and do not incur
6919   // any overhead.
6920   SmallVector<Type *> Tys;
6921   for (auto *V : filterExtractingOperands(Ops, VF))
6922     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
6923   return Cost + TTI.getOperandsScalarizationOverhead(
6924                     filterExtractingOperands(Ops, VF), Tys);
6925 }
6926 
6927 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6928   if (VF.isScalar())
6929     return;
6930   NumPredStores = 0;
6931   for (BasicBlock *BB : TheLoop->blocks()) {
6932     // For each instruction in the old loop.
6933     for (Instruction &I : *BB) {
6934       Value *Ptr =  getLoadStorePointerOperand(&I);
6935       if (!Ptr)
6936         continue;
6937 
6938       // TODO: We should generate better code and update the cost model for
6939       // predicated uniform stores. Today they are treated as any other
6940       // predicated store (see added test cases in
6941       // invariant-store-vectorization.ll).
6942       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
6943         NumPredStores++;
6944 
6945       if (Legal->isUniformMemOp(I)) {
6946         // TODO: Avoid replicating loads and stores instead of
6947         // relying on instcombine to remove them.
6948         // Load: Scalar load + broadcast
6949         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6950         InstructionCost Cost;
6951         if (isa<StoreInst>(&I) && VF.isScalable() &&
6952             isLegalGatherOrScatter(&I, VF)) {
6953           Cost = getGatherScatterCost(&I, VF);
6954           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
6955         } else {
6956           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
6957                  "Cannot yet scalarize uniform stores");
6958           Cost = getUniformMemOpCost(&I, VF);
6959           setWideningDecision(&I, VF, CM_Scalarize, Cost);
6960         }
6961         continue;
6962       }
6963 
6964       // We assume that widening is the best solution when possible.
6965       if (memoryInstructionCanBeWidened(&I, VF)) {
6966         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
6967         int ConsecutiveStride = Legal->isConsecutivePtr(
6968             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
6969         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6970                "Expected consecutive stride.");
6971         InstWidening Decision =
6972             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6973         setWideningDecision(&I, VF, Decision, Cost);
6974         continue;
6975       }
6976 
6977       // Choose between Interleaving, Gather/Scatter or Scalarization.
6978       InstructionCost InterleaveCost = InstructionCost::getInvalid();
6979       unsigned NumAccesses = 1;
6980       if (isAccessInterleaved(&I)) {
6981         auto Group = getInterleavedAccessGroup(&I);
6982         assert(Group && "Fail to get an interleaved access group.");
6983 
6984         // Make one decision for the whole group.
6985         if (getWideningDecision(&I, VF) != CM_Unknown)
6986           continue;
6987 
6988         NumAccesses = Group->getNumMembers();
6989         if (interleavedAccessCanBeWidened(&I, VF))
6990           InterleaveCost = getInterleaveGroupCost(&I, VF);
6991       }
6992 
6993       InstructionCost GatherScatterCost =
6994           isLegalGatherOrScatter(&I, VF)
6995               ? getGatherScatterCost(&I, VF) * NumAccesses
6996               : InstructionCost::getInvalid();
6997 
6998       InstructionCost ScalarizationCost =
6999           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7000 
7001       // Choose better solution for the current VF,
7002       // write down this decision and use it during vectorization.
7003       InstructionCost Cost;
7004       InstWidening Decision;
7005       if (InterleaveCost <= GatherScatterCost &&
7006           InterleaveCost < ScalarizationCost) {
7007         Decision = CM_Interleave;
7008         Cost = InterleaveCost;
7009       } else if (GatherScatterCost < ScalarizationCost) {
7010         Decision = CM_GatherScatter;
7011         Cost = GatherScatterCost;
7012       } else {
7013         Decision = CM_Scalarize;
7014         Cost = ScalarizationCost;
7015       }
7016       // If the instructions belongs to an interleave group, the whole group
7017       // receives the same decision. The whole group receives the cost, but
7018       // the cost will actually be assigned to one instruction.
7019       if (auto Group = getInterleavedAccessGroup(&I))
7020         setWideningDecision(Group, VF, Decision, Cost);
7021       else
7022         setWideningDecision(&I, VF, Decision, Cost);
7023     }
7024   }
7025 
7026   // Make sure that any load of address and any other address computation
7027   // remains scalar unless there is gather/scatter support. This avoids
7028   // inevitable extracts into address registers, and also has the benefit of
7029   // activating LSR more, since that pass can't optimize vectorized
7030   // addresses.
7031   if (TTI.prefersVectorizedAddressing())
7032     return;
7033 
7034   // Start with all scalar pointer uses.
7035   SmallPtrSet<Instruction *, 8> AddrDefs;
7036   for (BasicBlock *BB : TheLoop->blocks())
7037     for (Instruction &I : *BB) {
7038       Instruction *PtrDef =
7039         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7040       if (PtrDef && TheLoop->contains(PtrDef) &&
7041           getWideningDecision(&I, VF) != CM_GatherScatter)
7042         AddrDefs.insert(PtrDef);
7043     }
7044 
7045   // Add all instructions used to generate the addresses.
7046   SmallVector<Instruction *, 4> Worklist;
7047   append_range(Worklist, AddrDefs);
7048   while (!Worklist.empty()) {
7049     Instruction *I = Worklist.pop_back_val();
7050     for (auto &Op : I->operands())
7051       if (auto *InstOp = dyn_cast<Instruction>(Op))
7052         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7053             AddrDefs.insert(InstOp).second)
7054           Worklist.push_back(InstOp);
7055   }
7056 
7057   for (auto *I : AddrDefs) {
7058     if (isa<LoadInst>(I)) {
7059       // Setting the desired widening decision should ideally be handled in
7060       // by cost functions, but since this involves the task of finding out
7061       // if the loaded register is involved in an address computation, it is
7062       // instead changed here when we know this is the case.
7063       InstWidening Decision = getWideningDecision(I, VF);
7064       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7065         // Scalarize a widened load of address.
7066         setWideningDecision(
7067             I, VF, CM_Scalarize,
7068             (VF.getKnownMinValue() *
7069              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7070       else if (auto Group = getInterleavedAccessGroup(I)) {
7071         // Scalarize an interleave group of address loads.
7072         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7073           if (Instruction *Member = Group->getMember(I))
7074             setWideningDecision(
7075                 Member, VF, CM_Scalarize,
7076                 (VF.getKnownMinValue() *
7077                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7078         }
7079       }
7080     } else
7081       // Make sure I gets scalarized and a cost estimate without
7082       // scalarization overhead.
7083       ForcedScalars[VF].insert(I);
7084   }
7085 }
7086 
7087 InstructionCost
7088 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7089                                                Type *&VectorTy) {
7090   Type *RetTy = I->getType();
7091   if (canTruncateToMinimalBitwidth(I, VF))
7092     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7093   auto SE = PSE.getSE();
7094   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7095 
7096   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7097                                                 ElementCount VF) -> bool {
7098     if (VF.isScalar())
7099       return true;
7100 
7101     auto Scalarized = InstsToScalarize.find(VF);
7102     assert(Scalarized != InstsToScalarize.end() &&
7103            "VF not yet analyzed for scalarization profitability");
7104     return !Scalarized->second.count(I) &&
7105            llvm::all_of(I->users(), [&](User *U) {
7106              auto *UI = cast<Instruction>(U);
7107              return !Scalarized->second.count(UI);
7108            });
7109   };
7110   (void) hasSingleCopyAfterVectorization;
7111 
7112   if (isScalarAfterVectorization(I, VF)) {
7113     // With the exception of GEPs and PHIs, after scalarization there should
7114     // only be one copy of the instruction generated in the loop. This is
7115     // because the VF is either 1, or any instructions that need scalarizing
7116     // have already been dealt with by the the time we get here. As a result,
7117     // it means we don't have to multiply the instruction cost by VF.
7118     assert(I->getOpcode() == Instruction::GetElementPtr ||
7119            I->getOpcode() == Instruction::PHI ||
7120            (I->getOpcode() == Instruction::BitCast &&
7121             I->getType()->isPointerTy()) ||
7122            hasSingleCopyAfterVectorization(I, VF));
7123     VectorTy = RetTy;
7124   } else
7125     VectorTy = ToVectorTy(RetTy, VF);
7126 
7127   // TODO: We need to estimate the cost of intrinsic calls.
7128   switch (I->getOpcode()) {
7129   case Instruction::GetElementPtr:
7130     // We mark this instruction as zero-cost because the cost of GEPs in
7131     // vectorized code depends on whether the corresponding memory instruction
7132     // is scalarized or not. Therefore, we handle GEPs with the memory
7133     // instruction cost.
7134     return 0;
7135   case Instruction::Br: {
7136     // In cases of scalarized and predicated instructions, there will be VF
7137     // predicated blocks in the vectorized loop. Each branch around these
7138     // blocks requires also an extract of its vector compare i1 element.
7139     bool ScalarPredicatedBB = false;
7140     BranchInst *BI = cast<BranchInst>(I);
7141     if (VF.isVector() && BI->isConditional() &&
7142         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7143          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7144       ScalarPredicatedBB = true;
7145 
7146     if (ScalarPredicatedBB) {
7147       // Not possible to scalarize scalable vector with predicated instructions.
7148       if (VF.isScalable())
7149         return InstructionCost::getInvalid();
7150       // Return cost for branches around scalarized and predicated blocks.
7151       auto *Vec_i1Ty =
7152           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7153       return (
7154           TTI.getScalarizationOverhead(
7155               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7156           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7157     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7158       // The back-edge branch will remain, as will all scalar branches.
7159       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7160     else
7161       // This branch will be eliminated by if-conversion.
7162       return 0;
7163     // Note: We currently assume zero cost for an unconditional branch inside
7164     // a predicated block since it will become a fall-through, although we
7165     // may decide in the future to call TTI for all branches.
7166   }
7167   case Instruction::PHI: {
7168     auto *Phi = cast<PHINode>(I);
7169 
7170     // First-order recurrences are replaced by vector shuffles inside the loop.
7171     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7172     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7173       return TTI.getShuffleCost(
7174           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7175           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7176 
7177     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7178     // converted into select instructions. We require N - 1 selects per phi
7179     // node, where N is the number of incoming values.
7180     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7181       return (Phi->getNumIncomingValues() - 1) *
7182              TTI.getCmpSelInstrCost(
7183                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7184                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7185                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7186 
7187     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7188   }
7189   case Instruction::UDiv:
7190   case Instruction::SDiv:
7191   case Instruction::URem:
7192   case Instruction::SRem:
7193     // If we have a predicated instruction, it may not be executed for each
7194     // vector lane. Get the scalarization cost and scale this amount by the
7195     // probability of executing the predicated block. If the instruction is not
7196     // predicated, we fall through to the next case.
7197     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7198       InstructionCost Cost = 0;
7199 
7200       // These instructions have a non-void type, so account for the phi nodes
7201       // that we will create. This cost is likely to be zero. The phi node
7202       // cost, if any, should be scaled by the block probability because it
7203       // models a copy at the end of each predicated block.
7204       Cost += VF.getKnownMinValue() *
7205               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7206 
7207       // The cost of the non-predicated instruction.
7208       Cost += VF.getKnownMinValue() *
7209               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7210 
7211       // The cost of insertelement and extractelement instructions needed for
7212       // scalarization.
7213       Cost += getScalarizationOverhead(I, VF);
7214 
7215       // Scale the cost by the probability of executing the predicated blocks.
7216       // This assumes the predicated block for each vector lane is equally
7217       // likely.
7218       return Cost / getReciprocalPredBlockProb();
7219     }
7220     LLVM_FALLTHROUGH;
7221   case Instruction::Add:
7222   case Instruction::FAdd:
7223   case Instruction::Sub:
7224   case Instruction::FSub:
7225   case Instruction::Mul:
7226   case Instruction::FMul:
7227   case Instruction::FDiv:
7228   case Instruction::FRem:
7229   case Instruction::Shl:
7230   case Instruction::LShr:
7231   case Instruction::AShr:
7232   case Instruction::And:
7233   case Instruction::Or:
7234   case Instruction::Xor: {
7235     // Since we will replace the stride by 1 the multiplication should go away.
7236     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7237       return 0;
7238 
7239     // Detect reduction patterns
7240     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7241       return *RedCost;
7242 
7243     // Certain instructions can be cheaper to vectorize if they have a constant
7244     // second vector operand. One example of this are shifts on x86.
7245     Value *Op2 = I->getOperand(1);
7246     TargetTransformInfo::OperandValueProperties Op2VP;
7247     TargetTransformInfo::OperandValueKind Op2VK =
7248         TTI.getOperandInfo(Op2, Op2VP);
7249     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7250       Op2VK = TargetTransformInfo::OK_UniformValue;
7251 
7252     SmallVector<const Value *, 4> Operands(I->operand_values());
7253     return TTI.getArithmeticInstrCost(
7254         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7255         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7256   }
7257   case Instruction::FNeg: {
7258     return TTI.getArithmeticInstrCost(
7259         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7260         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7261         TargetTransformInfo::OP_None, I->getOperand(0), I);
7262   }
7263   case Instruction::Select: {
7264     SelectInst *SI = cast<SelectInst>(I);
7265     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7266     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7267 
7268     const Value *Op0, *Op1;
7269     using namespace llvm::PatternMatch;
7270     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7271                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7272       // select x, y, false --> x & y
7273       // select x, true, y --> x | y
7274       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7275       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7276       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7277       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7278       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7279               Op1->getType()->getScalarSizeInBits() == 1);
7280 
7281       SmallVector<const Value *, 2> Operands{Op0, Op1};
7282       return TTI.getArithmeticInstrCost(
7283           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7284           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7285     }
7286 
7287     Type *CondTy = SI->getCondition()->getType();
7288     if (!ScalarCond)
7289       CondTy = VectorType::get(CondTy, VF);
7290 
7291     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7292     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7293       Pred = Cmp->getPredicate();
7294     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7295                                   CostKind, I);
7296   }
7297   case Instruction::ICmp:
7298   case Instruction::FCmp: {
7299     Type *ValTy = I->getOperand(0)->getType();
7300     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7301     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7302       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7303     VectorTy = ToVectorTy(ValTy, VF);
7304     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7305                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7306                                   I);
7307   }
7308   case Instruction::Store:
7309   case Instruction::Load: {
7310     ElementCount Width = VF;
7311     if (Width.isVector()) {
7312       InstWidening Decision = getWideningDecision(I, Width);
7313       assert(Decision != CM_Unknown &&
7314              "CM decision should be taken at this point");
7315       if (Decision == CM_Scalarize)
7316         Width = ElementCount::getFixed(1);
7317     }
7318     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7319     return getMemoryInstructionCost(I, VF);
7320   }
7321   case Instruction::BitCast:
7322     if (I->getType()->isPointerTy())
7323       return 0;
7324     LLVM_FALLTHROUGH;
7325   case Instruction::ZExt:
7326   case Instruction::SExt:
7327   case Instruction::FPToUI:
7328   case Instruction::FPToSI:
7329   case Instruction::FPExt:
7330   case Instruction::PtrToInt:
7331   case Instruction::IntToPtr:
7332   case Instruction::SIToFP:
7333   case Instruction::UIToFP:
7334   case Instruction::Trunc:
7335   case Instruction::FPTrunc: {
7336     // Computes the CastContextHint from a Load/Store instruction.
7337     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7338       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7339              "Expected a load or a store!");
7340 
7341       if (VF.isScalar() || !TheLoop->contains(I))
7342         return TTI::CastContextHint::Normal;
7343 
7344       switch (getWideningDecision(I, VF)) {
7345       case LoopVectorizationCostModel::CM_GatherScatter:
7346         return TTI::CastContextHint::GatherScatter;
7347       case LoopVectorizationCostModel::CM_Interleave:
7348         return TTI::CastContextHint::Interleave;
7349       case LoopVectorizationCostModel::CM_Scalarize:
7350       case LoopVectorizationCostModel::CM_Widen:
7351         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7352                                         : TTI::CastContextHint::Normal;
7353       case LoopVectorizationCostModel::CM_Widen_Reverse:
7354         return TTI::CastContextHint::Reversed;
7355       case LoopVectorizationCostModel::CM_Unknown:
7356         llvm_unreachable("Instr did not go through cost modelling?");
7357       }
7358 
7359       llvm_unreachable("Unhandled case!");
7360     };
7361 
7362     unsigned Opcode = I->getOpcode();
7363     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7364     // For Trunc, the context is the only user, which must be a StoreInst.
7365     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7366       if (I->hasOneUse())
7367         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7368           CCH = ComputeCCH(Store);
7369     }
7370     // For Z/Sext, the context is the operand, which must be a LoadInst.
7371     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7372              Opcode == Instruction::FPExt) {
7373       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7374         CCH = ComputeCCH(Load);
7375     }
7376 
7377     // We optimize the truncation of induction variables having constant
7378     // integer steps. The cost of these truncations is the same as the scalar
7379     // operation.
7380     if (isOptimizableIVTruncate(I, VF)) {
7381       auto *Trunc = cast<TruncInst>(I);
7382       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7383                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7384     }
7385 
7386     // Detect reduction patterns
7387     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7388       return *RedCost;
7389 
7390     Type *SrcScalarTy = I->getOperand(0)->getType();
7391     Type *SrcVecTy =
7392         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7393     if (canTruncateToMinimalBitwidth(I, VF)) {
7394       // This cast is going to be shrunk. This may remove the cast or it might
7395       // turn it into slightly different cast. For example, if MinBW == 16,
7396       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7397       //
7398       // Calculate the modified src and dest types.
7399       Type *MinVecTy = VectorTy;
7400       if (Opcode == Instruction::Trunc) {
7401         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7402         VectorTy =
7403             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7404       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7405         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7406         VectorTy =
7407             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7408       }
7409     }
7410 
7411     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7412   }
7413   case Instruction::Call: {
7414     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7415       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7416         return *RedCost;
7417     bool NeedToScalarize;
7418     CallInst *CI = cast<CallInst>(I);
7419     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7420     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7421       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7422       return std::min(CallCost, IntrinsicCost);
7423     }
7424     return CallCost;
7425   }
7426   case Instruction::ExtractValue:
7427     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7428   case Instruction::Alloca:
7429     // We cannot easily widen alloca to a scalable alloca, as
7430     // the result would need to be a vector of pointers.
7431     if (VF.isScalable())
7432       return InstructionCost::getInvalid();
7433     LLVM_FALLTHROUGH;
7434   default:
7435     // This opcode is unknown. Assume that it is the same as 'mul'.
7436     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7437   } // end of switch.
7438 }
7439 
7440 char LoopVectorize::ID = 0;
7441 
7442 static const char lv_name[] = "Loop Vectorization";
7443 
7444 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7445 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7446 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7447 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7448 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7449 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7450 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7451 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7452 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7453 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7454 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7455 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7456 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7457 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7458 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7459 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7460 
7461 namespace llvm {
7462 
7463 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7464 
7465 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7466                               bool VectorizeOnlyWhenForced) {
7467   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7468 }
7469 
7470 } // end namespace llvm
7471 
7472 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7473   // Check if the pointer operand of a load or store instruction is
7474   // consecutive.
7475   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7476     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7477   return false;
7478 }
7479 
7480 void LoopVectorizationCostModel::collectValuesToIgnore() {
7481   // Ignore ephemeral values.
7482   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7483 
7484   // Ignore type-promoting instructions we identified during reduction
7485   // detection.
7486   for (auto &Reduction : Legal->getReductionVars()) {
7487     const RecurrenceDescriptor &RedDes = Reduction.second;
7488     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7489     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7490   }
7491   // Ignore type-casting instructions we identified during induction
7492   // detection.
7493   for (auto &Induction : Legal->getInductionVars()) {
7494     const InductionDescriptor &IndDes = Induction.second;
7495     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7496     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7497   }
7498 }
7499 
7500 void LoopVectorizationCostModel::collectInLoopReductions() {
7501   for (auto &Reduction : Legal->getReductionVars()) {
7502     PHINode *Phi = Reduction.first;
7503     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7504 
7505     // We don't collect reductions that are type promoted (yet).
7506     if (RdxDesc.getRecurrenceType() != Phi->getType())
7507       continue;
7508 
7509     // If the target would prefer this reduction to happen "in-loop", then we
7510     // want to record it as such.
7511     unsigned Opcode = RdxDesc.getOpcode();
7512     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7513         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7514                                    TargetTransformInfo::ReductionFlags()))
7515       continue;
7516 
7517     // Check that we can correctly put the reductions into the loop, by
7518     // finding the chain of operations that leads from the phi to the loop
7519     // exit value.
7520     SmallVector<Instruction *, 4> ReductionOperations =
7521         RdxDesc.getReductionOpChain(Phi, TheLoop);
7522     bool InLoop = !ReductionOperations.empty();
7523     if (InLoop) {
7524       InLoopReductionChains[Phi] = ReductionOperations;
7525       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7526       Instruction *LastChain = Phi;
7527       for (auto *I : ReductionOperations) {
7528         InLoopReductionImmediateChains[I] = LastChain;
7529         LastChain = I;
7530       }
7531     }
7532     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7533                       << " reduction for phi: " << *Phi << "\n");
7534   }
7535 }
7536 
7537 // TODO: we could return a pair of values that specify the max VF and
7538 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7539 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7540 // doesn't have a cost model that can choose which plan to execute if
7541 // more than one is generated.
7542 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7543                                  LoopVectorizationCostModel &CM) {
7544   unsigned WidestType;
7545   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7546   return WidestVectorRegBits / WidestType;
7547 }
7548 
7549 VectorizationFactor
7550 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7551   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7552   ElementCount VF = UserVF;
7553   // Outer loop handling: They may require CFG and instruction level
7554   // transformations before even evaluating whether vectorization is profitable.
7555   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7556   // the vectorization pipeline.
7557   if (!OrigLoop->isInnermost()) {
7558     // If the user doesn't provide a vectorization factor, determine a
7559     // reasonable one.
7560     if (UserVF.isZero()) {
7561       VF = ElementCount::getFixed(determineVPlanVF(
7562           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7563               .getFixedSize(),
7564           CM));
7565       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7566 
7567       // Make sure we have a VF > 1 for stress testing.
7568       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7569         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7570                           << "overriding computed VF.\n");
7571         VF = ElementCount::getFixed(4);
7572       }
7573     }
7574     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7575     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7576            "VF needs to be a power of two");
7577     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7578                       << "VF " << VF << " to build VPlans.\n");
7579     buildVPlans(VF, VF);
7580 
7581     // For VPlan build stress testing, we bail out after VPlan construction.
7582     if (VPlanBuildStressTest)
7583       return VectorizationFactor::Disabled();
7584 
7585     return {VF, 0 /*Cost*/};
7586   }
7587 
7588   LLVM_DEBUG(
7589       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7590                 "VPlan-native path.\n");
7591   return VectorizationFactor::Disabled();
7592 }
7593 
7594 Optional<VectorizationFactor>
7595 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7596   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7597   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7598   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7599     return None;
7600 
7601   // Invalidate interleave groups if all blocks of loop will be predicated.
7602   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7603       !useMaskedInterleavedAccesses(*TTI)) {
7604     LLVM_DEBUG(
7605         dbgs()
7606         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7607            "which requires masked-interleaved support.\n");
7608     if (CM.InterleaveInfo.invalidateGroups())
7609       // Invalidating interleave groups also requires invalidating all decisions
7610       // based on them, which includes widening decisions and uniform and scalar
7611       // values.
7612       CM.invalidateCostModelingDecisions();
7613   }
7614 
7615   ElementCount MaxUserVF =
7616       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7617   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7618   if (!UserVF.isZero() && UserVFIsLegal) {
7619     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7620            "VF needs to be a power of two");
7621     // Collect the instructions (and their associated costs) that will be more
7622     // profitable to scalarize.
7623     if (CM.selectUserVectorizationFactor(UserVF)) {
7624       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7625       CM.collectInLoopReductions();
7626       buildVPlansWithVPRecipes(UserVF, UserVF);
7627       LLVM_DEBUG(printPlans(dbgs()));
7628       return {{UserVF, 0}};
7629     } else
7630       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7631                               "InvalidCost", ORE, OrigLoop);
7632   }
7633 
7634   // Populate the set of Vectorization Factor Candidates.
7635   ElementCountSet VFCandidates;
7636   for (auto VF = ElementCount::getFixed(1);
7637        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7638     VFCandidates.insert(VF);
7639   for (auto VF = ElementCount::getScalable(1);
7640        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7641     VFCandidates.insert(VF);
7642 
7643   for (const auto &VF : VFCandidates) {
7644     // Collect Uniform and Scalar instructions after vectorization with VF.
7645     CM.collectUniformsAndScalars(VF);
7646 
7647     // Collect the instructions (and their associated costs) that will be more
7648     // profitable to scalarize.
7649     if (VF.isVector())
7650       CM.collectInstsToScalarize(VF);
7651   }
7652 
7653   CM.collectInLoopReductions();
7654   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7655   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7656 
7657   LLVM_DEBUG(printPlans(dbgs()));
7658   if (!MaxFactors.hasVector())
7659     return VectorizationFactor::Disabled();
7660 
7661   // Select the optimal vectorization factor.
7662   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7663 
7664   // Check if it is profitable to vectorize with runtime checks.
7665   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7666   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7667     bool PragmaThresholdReached =
7668         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7669     bool ThresholdReached =
7670         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7671     if ((ThresholdReached && !Hints.allowReordering()) ||
7672         PragmaThresholdReached) {
7673       ORE->emit([&]() {
7674         return OptimizationRemarkAnalysisAliasing(
7675                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7676                    OrigLoop->getHeader())
7677                << "loop not vectorized: cannot prove it is safe to reorder "
7678                   "memory operations";
7679       });
7680       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7681       Hints.emitRemarkWithHints();
7682       return VectorizationFactor::Disabled();
7683     }
7684   }
7685   return SelectedVF;
7686 }
7687 
7688 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7689   assert(count_if(VPlans,
7690                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7691              1 &&
7692          "Best VF has not a single VPlan.");
7693 
7694   for (const VPlanPtr &Plan : VPlans) {
7695     if (Plan->hasVF(VF))
7696       return *Plan.get();
7697   }
7698   llvm_unreachable("No plan found!");
7699 }
7700 
7701 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7702   SmallVector<Metadata *, 4> MDs;
7703   // Reserve first location for self reference to the LoopID metadata node.
7704   MDs.push_back(nullptr);
7705   bool IsUnrollMetadata = false;
7706   MDNode *LoopID = L->getLoopID();
7707   if (LoopID) {
7708     // First find existing loop unrolling disable metadata.
7709     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7710       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7711       if (MD) {
7712         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7713         IsUnrollMetadata =
7714             S && S->getString().startswith("llvm.loop.unroll.disable");
7715       }
7716       MDs.push_back(LoopID->getOperand(i));
7717     }
7718   }
7719 
7720   if (!IsUnrollMetadata) {
7721     // Add runtime unroll disable metadata.
7722     LLVMContext &Context = L->getHeader()->getContext();
7723     SmallVector<Metadata *, 1> DisableOperands;
7724     DisableOperands.push_back(
7725         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7726     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7727     MDs.push_back(DisableNode);
7728     MDNode *NewLoopID = MDNode::get(Context, MDs);
7729     // Set operand 0 to refer to the loop id itself.
7730     NewLoopID->replaceOperandWith(0, NewLoopID);
7731     L->setLoopID(NewLoopID);
7732   }
7733 }
7734 
7735 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7736                                            VPlan &BestVPlan,
7737                                            InnerLoopVectorizer &ILV,
7738                                            DominatorTree *DT) {
7739   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7740                     << '\n');
7741 
7742   // Perform the actual loop transformation.
7743 
7744   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7745   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7746   Value *CanonicalIVStartValue;
7747   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7748       ILV.createVectorizedLoopSkeleton();
7749   ILV.collectPoisonGeneratingRecipes(State);
7750 
7751   ILV.printDebugTracesAtStart();
7752 
7753   //===------------------------------------------------===//
7754   //
7755   // Notice: any optimization or new instruction that go
7756   // into the code below should also be implemented in
7757   // the cost-model.
7758   //
7759   //===------------------------------------------------===//
7760 
7761   // 2. Copy and widen instructions from the old loop into the new loop.
7762   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7763                              ILV.getOrCreateVectorTripCount(nullptr),
7764                              CanonicalIVStartValue, State);
7765   BestVPlan.execute(&State);
7766 
7767   // Keep all loop hints from the original loop on the vector loop (we'll
7768   // replace the vectorizer-specific hints below).
7769   MDNode *OrigLoopID = OrigLoop->getLoopID();
7770 
7771   Optional<MDNode *> VectorizedLoopID =
7772       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7773                                       LLVMLoopVectorizeFollowupVectorized});
7774 
7775   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7776   if (VectorizedLoopID.hasValue())
7777     L->setLoopID(VectorizedLoopID.getValue());
7778   else {
7779     // Keep all loop hints from the original loop on the vector loop (we'll
7780     // replace the vectorizer-specific hints below).
7781     if (MDNode *LID = OrigLoop->getLoopID())
7782       L->setLoopID(LID);
7783 
7784     LoopVectorizeHints Hints(L, true, *ORE);
7785     Hints.setAlreadyVectorized();
7786   }
7787   // Disable runtime unrolling when vectorizing the epilogue loop.
7788   if (CanonicalIVStartValue)
7789     AddRuntimeUnrollDisableMetaData(L);
7790 
7791   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7792   //    predication, updating analyses.
7793   ILV.fixVectorizedLoop(State);
7794 
7795   ILV.printDebugTracesAtEnd();
7796 }
7797 
7798 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7799 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7800   for (const auto &Plan : VPlans)
7801     if (PrintVPlansInDotFormat)
7802       Plan->printDOT(O);
7803     else
7804       Plan->print(O);
7805 }
7806 #endif
7807 
7808 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7809     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7810 
7811   // We create new control-flow for the vectorized loop, so the original exit
7812   // conditions will be dead after vectorization if it's only used by the
7813   // terminator
7814   SmallVector<BasicBlock*> ExitingBlocks;
7815   OrigLoop->getExitingBlocks(ExitingBlocks);
7816   for (auto *BB : ExitingBlocks) {
7817     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7818     if (!Cmp || !Cmp->hasOneUse())
7819       continue;
7820 
7821     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7822     if (!DeadInstructions.insert(Cmp).second)
7823       continue;
7824 
7825     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7826     // TODO: can recurse through operands in general
7827     for (Value *Op : Cmp->operands()) {
7828       if (isa<TruncInst>(Op) && Op->hasOneUse())
7829           DeadInstructions.insert(cast<Instruction>(Op));
7830     }
7831   }
7832 
7833   // We create new "steps" for induction variable updates to which the original
7834   // induction variables map. An original update instruction will be dead if
7835   // all its users except the induction variable are dead.
7836   auto *Latch = OrigLoop->getLoopLatch();
7837   for (auto &Induction : Legal->getInductionVars()) {
7838     PHINode *Ind = Induction.first;
7839     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7840 
7841     // If the tail is to be folded by masking, the primary induction variable,
7842     // if exists, isn't dead: it will be used for masking. Don't kill it.
7843     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7844       continue;
7845 
7846     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7847           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7848         }))
7849       DeadInstructions.insert(IndUpdate);
7850   }
7851 }
7852 
7853 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7854 
7855 //===--------------------------------------------------------------------===//
7856 // EpilogueVectorizerMainLoop
7857 //===--------------------------------------------------------------------===//
7858 
7859 /// This function is partially responsible for generating the control flow
7860 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7861 std::pair<BasicBlock *, Value *>
7862 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7863   MDNode *OrigLoopID = OrigLoop->getLoopID();
7864   Loop *Lp = createVectorLoopSkeleton("");
7865 
7866   // Generate the code to check the minimum iteration count of the vector
7867   // epilogue (see below).
7868   EPI.EpilogueIterationCountCheck =
7869       emitMinimumIterationCountCheck(LoopScalarPreHeader, true);
7870   EPI.EpilogueIterationCountCheck->setName("iter.check");
7871 
7872   // Generate the code to check any assumptions that we've made for SCEV
7873   // expressions.
7874   EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader);
7875 
7876   // Generate the code that checks at runtime if arrays overlap. We put the
7877   // checks into a separate block to make the more common case of few elements
7878   // faster.
7879   EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader);
7880 
7881   // Generate the iteration count check for the main loop, *after* the check
7882   // for the epilogue loop, so that the path-length is shorter for the case
7883   // that goes directly through the vector epilogue. The longer-path length for
7884   // the main loop is compensated for, by the gain from vectorizing the larger
7885   // trip count. Note: the branch will get updated later on when we vectorize
7886   // the epilogue.
7887   EPI.MainLoopIterationCountCheck =
7888       emitMinimumIterationCountCheck(LoopScalarPreHeader, false);
7889 
7890   // Generate the induction variable.
7891   Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader);
7892   EPI.VectorTripCount = CountRoundDown;
7893   createHeaderBranch(Lp);
7894 
7895   // Skip induction resume value creation here because they will be created in
7896   // the second pass. If we created them here, they wouldn't be used anyway,
7897   // because the vplan in the second pass still contains the inductions from the
7898   // original loop.
7899 
7900   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
7901 }
7902 
7903 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7904   LLVM_DEBUG({
7905     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7906            << "Main Loop VF:" << EPI.MainLoopVF
7907            << ", Main Loop UF:" << EPI.MainLoopUF
7908            << ", Epilogue Loop VF:" << EPI.EpilogueVF
7909            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7910   });
7911 }
7912 
7913 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7914   DEBUG_WITH_TYPE(VerboseDebug, {
7915     dbgs() << "intermediate fn:\n"
7916            << *OrigLoop->getHeader()->getParent() << "\n";
7917   });
7918 }
7919 
7920 BasicBlock *
7921 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass,
7922                                                            bool ForEpilogue) {
7923   assert(Bypass && "Expected valid bypass basic block.");
7924   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
7925   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
7926   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
7927   // Reuse existing vector loop preheader for TC checks.
7928   // Note that new preheader block is generated for vector loop.
7929   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
7930   IRBuilder<> Builder(TCCheckBlock->getTerminator());
7931 
7932   // Generate code to check if the loop's trip count is less than VF * UF of the
7933   // main vector loop.
7934   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
7935       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7936 
7937   Value *CheckMinIters = Builder.CreateICmp(
7938       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
7939       "min.iters.check");
7940 
7941   if (!ForEpilogue)
7942     TCCheckBlock->setName("vector.main.loop.iter.check");
7943 
7944   // Create new preheader for vector loop.
7945   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7946                                    DT, LI, nullptr, "vector.ph");
7947 
7948   if (ForEpilogue) {
7949     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
7950                                  DT->getNode(Bypass)->getIDom()) &&
7951            "TC check is expected to dominate Bypass");
7952 
7953     // Update dominator for Bypass & LoopExit.
7954     DT->changeImmediateDominator(Bypass, TCCheckBlock);
7955     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7956       // For loops with multiple exits, there's no edge from the middle block
7957       // to exit blocks (as the epilogue must run) and thus no need to update
7958       // the immediate dominator of the exit blocks.
7959       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
7960 
7961     LoopBypassBlocks.push_back(TCCheckBlock);
7962 
7963     // Save the trip count so we don't have to regenerate it in the
7964     // vec.epilog.iter.check. This is safe to do because the trip count
7965     // generated here dominates the vector epilog iter check.
7966     EPI.TripCount = Count;
7967   }
7968 
7969   ReplaceInstWithInst(
7970       TCCheckBlock->getTerminator(),
7971       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7972 
7973   return TCCheckBlock;
7974 }
7975 
7976 //===--------------------------------------------------------------------===//
7977 // EpilogueVectorizerEpilogueLoop
7978 //===--------------------------------------------------------------------===//
7979 
7980 /// This function is partially responsible for generating the control flow
7981 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7982 std::pair<BasicBlock *, Value *>
7983 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
7984   MDNode *OrigLoopID = OrigLoop->getLoopID();
7985   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
7986 
7987   // Now, compare the remaining count and if there aren't enough iterations to
7988   // execute the vectorized epilogue skip to the scalar part.
7989   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
7990   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
7991   LoopVectorPreHeader =
7992       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
7993                  LI, nullptr, "vec.epilog.ph");
7994   emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader,
7995                                           VecEpilogueIterationCountCheck);
7996 
7997   // Adjust the control flow taking the state info from the main loop
7998   // vectorization into account.
7999   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8000          "expected this to be saved from the previous pass.");
8001   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8002       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8003 
8004   DT->changeImmediateDominator(LoopVectorPreHeader,
8005                                EPI.MainLoopIterationCountCheck);
8006 
8007   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8008       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8009 
8010   if (EPI.SCEVSafetyCheck)
8011     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8012         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8013   if (EPI.MemSafetyCheck)
8014     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8015         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8016 
8017   DT->changeImmediateDominator(
8018       VecEpilogueIterationCountCheck,
8019       VecEpilogueIterationCountCheck->getSinglePredecessor());
8020 
8021   DT->changeImmediateDominator(LoopScalarPreHeader,
8022                                EPI.EpilogueIterationCountCheck);
8023   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8024     // If there is an epilogue which must run, there's no edge from the
8025     // middle block to exit blocks  and thus no need to update the immediate
8026     // dominator of the exit blocks.
8027     DT->changeImmediateDominator(LoopExitBlock,
8028                                  EPI.EpilogueIterationCountCheck);
8029 
8030   // Keep track of bypass blocks, as they feed start values to the induction
8031   // phis in the scalar loop preheader.
8032   if (EPI.SCEVSafetyCheck)
8033     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8034   if (EPI.MemSafetyCheck)
8035     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8036   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8037 
8038   // The vec.epilog.iter.check block may contain Phi nodes from reductions which
8039   // merge control-flow from the latch block and the middle block. Update the
8040   // incoming values here and move the Phi into the preheader.
8041   SmallVector<PHINode *, 4> PhisInBlock;
8042   for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
8043     PhisInBlock.push_back(&Phi);
8044 
8045   for (PHINode *Phi : PhisInBlock) {
8046     Phi->replaceIncomingBlockWith(
8047         VecEpilogueIterationCountCheck->getSinglePredecessor(),
8048         VecEpilogueIterationCountCheck);
8049     Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
8050     if (EPI.SCEVSafetyCheck)
8051       Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
8052     if (EPI.MemSafetyCheck)
8053       Phi->removeIncomingValue(EPI.MemSafetyCheck);
8054     Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
8055   }
8056 
8057   // Generate a resume induction for the vector epilogue and put it in the
8058   // vector epilogue preheader
8059   Type *IdxTy = Legal->getWidestInductionType();
8060   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8061                                          LoopVectorPreHeader->getFirstNonPHI());
8062   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8063   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8064                            EPI.MainLoopIterationCountCheck);
8065 
8066   // Generate the induction variable.
8067   createHeaderBranch(Lp);
8068 
8069   // Generate induction resume values. These variables save the new starting
8070   // indexes for the scalar loop. They are used to test if there are any tail
8071   // iterations left once the vector loop has completed.
8072   // Note that when the vectorized epilogue is skipped due to iteration count
8073   // check, then the resume value for the induction variable comes from
8074   // the trip count of the main vector loop, hence passing the AdditionalBypass
8075   // argument.
8076   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8077                                    EPI.VectorTripCount} /* AdditionalBypass */);
8078 
8079   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8080 }
8081 
8082 BasicBlock *
8083 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8084     BasicBlock *Bypass, BasicBlock *Insert) {
8085 
8086   assert(EPI.TripCount &&
8087          "Expected trip count to have been safed in the first pass.");
8088   assert(
8089       (!isa<Instruction>(EPI.TripCount) ||
8090        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8091       "saved trip count does not dominate insertion point.");
8092   Value *TC = EPI.TripCount;
8093   IRBuilder<> Builder(Insert->getTerminator());
8094   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8095 
8096   // Generate code to check if the loop's trip count is less than VF * UF of the
8097   // vector epilogue loop.
8098   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8099       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8100 
8101   Value *CheckMinIters =
8102       Builder.CreateICmp(P, Count,
8103                          createStepForVF(Builder, Count->getType(),
8104                                          EPI.EpilogueVF, EPI.EpilogueUF),
8105                          "min.epilog.iters.check");
8106 
8107   ReplaceInstWithInst(
8108       Insert->getTerminator(),
8109       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8110 
8111   LoopBypassBlocks.push_back(Insert);
8112   return Insert;
8113 }
8114 
8115 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8116   LLVM_DEBUG({
8117     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8118            << "Epilogue Loop VF:" << EPI.EpilogueVF
8119            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8120   });
8121 }
8122 
8123 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8124   DEBUG_WITH_TYPE(VerboseDebug, {
8125     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8126   });
8127 }
8128 
8129 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8130     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8131   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8132   bool PredicateAtRangeStart = Predicate(Range.Start);
8133 
8134   for (ElementCount TmpVF = Range.Start * 2;
8135        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8136     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8137       Range.End = TmpVF;
8138       break;
8139     }
8140 
8141   return PredicateAtRangeStart;
8142 }
8143 
8144 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8145 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8146 /// of VF's starting at a given VF and extending it as much as possible. Each
8147 /// vectorization decision can potentially shorten this sub-range during
8148 /// buildVPlan().
8149 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8150                                            ElementCount MaxVF) {
8151   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8152   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8153     VFRange SubRange = {VF, MaxVFPlusOne};
8154     VPlans.push_back(buildVPlan(SubRange));
8155     VF = SubRange.End;
8156   }
8157 }
8158 
8159 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8160                                          VPlanPtr &Plan) {
8161   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8162 
8163   // Look for cached value.
8164   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8165   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8166   if (ECEntryIt != EdgeMaskCache.end())
8167     return ECEntryIt->second;
8168 
8169   VPValue *SrcMask = createBlockInMask(Src, Plan);
8170 
8171   // The terminator has to be a branch inst!
8172   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8173   assert(BI && "Unexpected terminator found");
8174 
8175   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8176     return EdgeMaskCache[Edge] = SrcMask;
8177 
8178   // If source is an exiting block, we know the exit edge is dynamically dead
8179   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8180   // adding uses of an otherwise potentially dead instruction.
8181   if (OrigLoop->isLoopExiting(Src))
8182     return EdgeMaskCache[Edge] = SrcMask;
8183 
8184   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8185   assert(EdgeMask && "No Edge Mask found for condition");
8186 
8187   if (BI->getSuccessor(0) != Dst)
8188     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8189 
8190   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8191     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8192     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8193     // The select version does not introduce new UB if SrcMask is false and
8194     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8195     VPValue *False = Plan->getOrAddVPValue(
8196         ConstantInt::getFalse(BI->getCondition()->getType()));
8197     EdgeMask =
8198         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8199   }
8200 
8201   return EdgeMaskCache[Edge] = EdgeMask;
8202 }
8203 
8204 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8205   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8206 
8207   // Look for cached value.
8208   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8209   if (BCEntryIt != BlockMaskCache.end())
8210     return BCEntryIt->second;
8211 
8212   // All-one mask is modelled as no-mask following the convention for masked
8213   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8214   VPValue *BlockMask = nullptr;
8215 
8216   if (OrigLoop->getHeader() == BB) {
8217     if (!CM.blockNeedsPredicationForAnyReason(BB))
8218       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8219 
8220     // Introduce the early-exit compare IV <= BTC to form header block mask.
8221     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8222     // constructing the desired canonical IV in the header block as its first
8223     // non-phi instructions.
8224     assert(CM.foldTailByMasking() && "must fold the tail");
8225     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8226     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8227     auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8228     HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
8229 
8230     VPBuilder::InsertPointGuard Guard(Builder);
8231     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8232     if (CM.TTI.emitGetActiveLaneMask()) {
8233       VPValue *TC = Plan->getOrCreateTripCount();
8234       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8235     } else {
8236       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8237       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8238     }
8239     return BlockMaskCache[BB] = BlockMask;
8240   }
8241 
8242   // This is the block mask. We OR all incoming edges.
8243   for (auto *Predecessor : predecessors(BB)) {
8244     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8245     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8246       return BlockMaskCache[BB] = EdgeMask;
8247 
8248     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8249       BlockMask = EdgeMask;
8250       continue;
8251     }
8252 
8253     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8254   }
8255 
8256   return BlockMaskCache[BB] = BlockMask;
8257 }
8258 
8259 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8260                                                 ArrayRef<VPValue *> Operands,
8261                                                 VFRange &Range,
8262                                                 VPlanPtr &Plan) {
8263   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8264          "Must be called with either a load or store");
8265 
8266   auto willWiden = [&](ElementCount VF) -> bool {
8267     if (VF.isScalar())
8268       return false;
8269     LoopVectorizationCostModel::InstWidening Decision =
8270         CM.getWideningDecision(I, VF);
8271     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8272            "CM decision should be taken at this point.");
8273     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8274       return true;
8275     if (CM.isScalarAfterVectorization(I, VF) ||
8276         CM.isProfitableToScalarize(I, VF))
8277       return false;
8278     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8279   };
8280 
8281   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8282     return nullptr;
8283 
8284   VPValue *Mask = nullptr;
8285   if (Legal->isMaskRequired(I))
8286     Mask = createBlockInMask(I->getParent(), Plan);
8287 
8288   // Determine if the pointer operand of the access is either consecutive or
8289   // reverse consecutive.
8290   LoopVectorizationCostModel::InstWidening Decision =
8291       CM.getWideningDecision(I, Range.Start);
8292   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8293   bool Consecutive =
8294       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8295 
8296   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8297     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8298                                               Consecutive, Reverse);
8299 
8300   StoreInst *Store = cast<StoreInst>(I);
8301   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8302                                             Mask, Consecutive, Reverse);
8303 }
8304 
8305 static VPWidenIntOrFpInductionRecipe *
8306 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc,
8307                            VPValue *Start, const InductionDescriptor &IndDesc,
8308                            LoopVectorizationCostModel &CM, ScalarEvolution &SE,
8309                            Loop &OrigLoop, VFRange &Range) {
8310   // Returns true if an instruction \p I should be scalarized instead of
8311   // vectorized for the chosen vectorization factor.
8312   auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
8313     return CM.isScalarAfterVectorization(I, VF) ||
8314            CM.isProfitableToScalarize(I, VF);
8315   };
8316 
8317   bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange(
8318       [&](ElementCount VF) {
8319         // Returns true if we should generate a scalar version of \p IV.
8320         if (ShouldScalarizeInstruction(PhiOrTrunc, VF))
8321           return true;
8322         auto isScalarInst = [&](User *U) -> bool {
8323           auto *I = cast<Instruction>(U);
8324           return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF);
8325         };
8326         return any_of(PhiOrTrunc->users(), isScalarInst);
8327       },
8328       Range);
8329   bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
8330       [&](ElementCount VF) {
8331         return ShouldScalarizeInstruction(PhiOrTrunc, VF);
8332       },
8333       Range);
8334   assert(IndDesc.getStartValue() ==
8335          Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
8336   assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
8337          "step must be loop invariant");
8338   if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8339     return new VPWidenIntOrFpInductionRecipe(
8340         Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE);
8341   }
8342   assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
8343   return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV,
8344                                            !NeedsScalarIVOnly, SE);
8345 }
8346 
8347 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
8348     PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const {
8349 
8350   // Check if this is an integer or fp induction. If so, build the recipe that
8351   // produces its scalar and vector values.
8352   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
8353     return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM,
8354                                       *PSE.getSE(), *OrigLoop, Range);
8355 
8356   return nullptr;
8357 }
8358 
8359 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8360     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8361     VPlan &Plan) const {
8362   // Optimize the special case where the source is a constant integer
8363   // induction variable. Notice that we can only optimize the 'trunc' case
8364   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8365   // (c) other casts depend on pointer size.
8366 
8367   // Determine whether \p K is a truncation based on an induction variable that
8368   // can be optimized.
8369   auto isOptimizableIVTruncate =
8370       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8371     return [=](ElementCount VF) -> bool {
8372       return CM.isOptimizableIVTruncate(K, VF);
8373     };
8374   };
8375 
8376   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8377           isOptimizableIVTruncate(I), Range)) {
8378 
8379     auto *Phi = cast<PHINode>(I->getOperand(0));
8380     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8381     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8382     return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(),
8383                                       *OrigLoop, Range);
8384   }
8385   return nullptr;
8386 }
8387 
8388 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8389                                                 ArrayRef<VPValue *> Operands,
8390                                                 VPlanPtr &Plan) {
8391   // If all incoming values are equal, the incoming VPValue can be used directly
8392   // instead of creating a new VPBlendRecipe.
8393   VPValue *FirstIncoming = Operands[0];
8394   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8395         return FirstIncoming == Inc;
8396       })) {
8397     return Operands[0];
8398   }
8399 
8400   unsigned NumIncoming = Phi->getNumIncomingValues();
8401   // For in-loop reductions, we do not need to create an additional select.
8402   VPValue *InLoopVal = nullptr;
8403   for (unsigned In = 0; In < NumIncoming; In++) {
8404     PHINode *PhiOp =
8405         dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue());
8406     if (PhiOp && CM.isInLoopReduction(PhiOp)) {
8407       assert(!InLoopVal && "Found more than one in-loop reduction!");
8408       InLoopVal = Operands[In];
8409     }
8410   }
8411 
8412   assert((!InLoopVal || NumIncoming == 2) &&
8413          "Found an in-loop reduction for PHI with unexpected number of "
8414          "incoming values");
8415   if (InLoopVal)
8416     return Operands[Operands[0] == InLoopVal ? 1 : 0];
8417 
8418   // We know that all PHIs in non-header blocks are converted into selects, so
8419   // we don't have to worry about the insertion order and we can just use the
8420   // builder. At this point we generate the predication tree. There may be
8421   // duplications since this is a simple recursive scan, but future
8422   // optimizations will clean it up.
8423   SmallVector<VPValue *, 2> OperandsWithMask;
8424 
8425   for (unsigned In = 0; In < NumIncoming; In++) {
8426     VPValue *EdgeMask =
8427       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8428     assert((EdgeMask || NumIncoming == 1) &&
8429            "Multiple predecessors with one having a full mask");
8430     OperandsWithMask.push_back(Operands[In]);
8431     if (EdgeMask)
8432       OperandsWithMask.push_back(EdgeMask);
8433   }
8434   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8435 }
8436 
8437 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8438                                                    ArrayRef<VPValue *> Operands,
8439                                                    VFRange &Range) const {
8440 
8441   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8442       [this, CI](ElementCount VF) {
8443         return CM.isScalarWithPredication(CI, VF);
8444       },
8445       Range);
8446 
8447   if (IsPredicated)
8448     return nullptr;
8449 
8450   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8451   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8452              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8453              ID == Intrinsic::pseudoprobe ||
8454              ID == Intrinsic::experimental_noalias_scope_decl))
8455     return nullptr;
8456 
8457   auto willWiden = [&](ElementCount VF) -> bool {
8458     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8459     // The following case may be scalarized depending on the VF.
8460     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8461     // version of the instruction.
8462     // Is it beneficial to perform intrinsic call compared to lib call?
8463     bool NeedToScalarize = false;
8464     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8465     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8466     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8467     return UseVectorIntrinsic || !NeedToScalarize;
8468   };
8469 
8470   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8471     return nullptr;
8472 
8473   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8474   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8475 }
8476 
8477 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8478   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8479          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8480   // Instruction should be widened, unless it is scalar after vectorization,
8481   // scalarization is profitable or it is predicated.
8482   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8483     return CM.isScalarAfterVectorization(I, VF) ||
8484            CM.isProfitableToScalarize(I, VF) ||
8485            CM.isScalarWithPredication(I, VF);
8486   };
8487   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8488                                                              Range);
8489 }
8490 
8491 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8492                                            ArrayRef<VPValue *> Operands) const {
8493   auto IsVectorizableOpcode = [](unsigned Opcode) {
8494     switch (Opcode) {
8495     case Instruction::Add:
8496     case Instruction::And:
8497     case Instruction::AShr:
8498     case Instruction::BitCast:
8499     case Instruction::FAdd:
8500     case Instruction::FCmp:
8501     case Instruction::FDiv:
8502     case Instruction::FMul:
8503     case Instruction::FNeg:
8504     case Instruction::FPExt:
8505     case Instruction::FPToSI:
8506     case Instruction::FPToUI:
8507     case Instruction::FPTrunc:
8508     case Instruction::FRem:
8509     case Instruction::FSub:
8510     case Instruction::ICmp:
8511     case Instruction::IntToPtr:
8512     case Instruction::LShr:
8513     case Instruction::Mul:
8514     case Instruction::Or:
8515     case Instruction::PtrToInt:
8516     case Instruction::SDiv:
8517     case Instruction::Select:
8518     case Instruction::SExt:
8519     case Instruction::Shl:
8520     case Instruction::SIToFP:
8521     case Instruction::SRem:
8522     case Instruction::Sub:
8523     case Instruction::Trunc:
8524     case Instruction::UDiv:
8525     case Instruction::UIToFP:
8526     case Instruction::URem:
8527     case Instruction::Xor:
8528     case Instruction::ZExt:
8529       return true;
8530     }
8531     return false;
8532   };
8533 
8534   if (!IsVectorizableOpcode(I->getOpcode()))
8535     return nullptr;
8536 
8537   // Success: widen this instruction.
8538   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8539 }
8540 
8541 void VPRecipeBuilder::fixHeaderPhis() {
8542   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8543   for (VPHeaderPHIRecipe *R : PhisToFix) {
8544     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8545     VPRecipeBase *IncR =
8546         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8547     R->addOperand(IncR->getVPSingleValue());
8548   }
8549 }
8550 
8551 VPBasicBlock *VPRecipeBuilder::handleReplication(
8552     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8553     VPlanPtr &Plan) {
8554   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8555       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8556       Range);
8557 
8558   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8559       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8560       Range);
8561 
8562   // Even if the instruction is not marked as uniform, there are certain
8563   // intrinsic calls that can be effectively treated as such, so we check for
8564   // them here. Conservatively, we only do this for scalable vectors, since
8565   // for fixed-width VFs we can always fall back on full scalarization.
8566   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8567     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8568     case Intrinsic::assume:
8569     case Intrinsic::lifetime_start:
8570     case Intrinsic::lifetime_end:
8571       // For scalable vectors if one of the operands is variant then we still
8572       // want to mark as uniform, which will generate one instruction for just
8573       // the first lane of the vector. We can't scalarize the call in the same
8574       // way as for fixed-width vectors because we don't know how many lanes
8575       // there are.
8576       //
8577       // The reasons for doing it this way for scalable vectors are:
8578       //   1. For the assume intrinsic generating the instruction for the first
8579       //      lane is still be better than not generating any at all. For
8580       //      example, the input may be a splat across all lanes.
8581       //   2. For the lifetime start/end intrinsics the pointer operand only
8582       //      does anything useful when the input comes from a stack object,
8583       //      which suggests it should always be uniform. For non-stack objects
8584       //      the effect is to poison the object, which still allows us to
8585       //      remove the call.
8586       IsUniform = true;
8587       break;
8588     default:
8589       break;
8590     }
8591   }
8592 
8593   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8594                                        IsUniform, IsPredicated);
8595   setRecipe(I, Recipe);
8596   Plan->addVPValue(I, Recipe);
8597 
8598   // Find if I uses a predicated instruction. If so, it will use its scalar
8599   // value. Avoid hoisting the insert-element which packs the scalar value into
8600   // a vector value, as that happens iff all users use the vector value.
8601   for (VPValue *Op : Recipe->operands()) {
8602     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8603     if (!PredR)
8604       continue;
8605     auto *RepR =
8606         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8607     assert(RepR->isPredicated() &&
8608            "expected Replicate recipe to be predicated");
8609     RepR->setAlsoPack(false);
8610   }
8611 
8612   // Finalize the recipe for Instr, first if it is not predicated.
8613   if (!IsPredicated) {
8614     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8615     VPBB->appendRecipe(Recipe);
8616     return VPBB;
8617   }
8618   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8619 
8620   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8621   assert(SingleSucc && "VPBB must have a single successor when handling "
8622                        "predicated replication.");
8623   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8624   // Record predicated instructions for above packing optimizations.
8625   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8626   VPBlockUtils::insertBlockAfter(Region, VPBB);
8627   auto *RegSucc = new VPBasicBlock();
8628   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8629   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8630   return RegSucc;
8631 }
8632 
8633 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8634                                                       VPRecipeBase *PredRecipe,
8635                                                       VPlanPtr &Plan) {
8636   // Instructions marked for predication are replicated and placed under an
8637   // if-then construct to prevent side-effects.
8638 
8639   // Generate recipes to compute the block mask for this region.
8640   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8641 
8642   // Build the triangular if-then region.
8643   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8644   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8645   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8646   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8647   auto *PHIRecipe = Instr->getType()->isVoidTy()
8648                         ? nullptr
8649                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8650   if (PHIRecipe) {
8651     Plan->removeVPValueFor(Instr);
8652     Plan->addVPValue(Instr, PHIRecipe);
8653   }
8654   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8655   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8656   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8657 
8658   // Note: first set Entry as region entry and then connect successors starting
8659   // from it in order, to propagate the "parent" of each VPBasicBlock.
8660   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8661   VPBlockUtils::connectBlocks(Pred, Exit);
8662 
8663   return Region;
8664 }
8665 
8666 VPRecipeOrVPValueTy
8667 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8668                                         ArrayRef<VPValue *> Operands,
8669                                         VFRange &Range, VPlanPtr &Plan) {
8670   // First, check for specific widening recipes that deal with calls, memory
8671   // operations, inductions and Phi nodes.
8672   if (auto *CI = dyn_cast<CallInst>(Instr))
8673     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8674 
8675   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8676     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8677 
8678   VPRecipeBase *Recipe;
8679   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8680     if (Phi->getParent() != OrigLoop->getHeader())
8681       return tryToBlend(Phi, Operands, Plan);
8682     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8683       return toVPRecipeResult(Recipe);
8684 
8685     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8686     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8687       VPValue *StartV = Operands[0];
8688       if (Legal->isReductionVariable(Phi)) {
8689         const RecurrenceDescriptor &RdxDesc =
8690             Legal->getReductionVars().find(Phi)->second;
8691         assert(RdxDesc.getRecurrenceStartValue() ==
8692                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8693         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8694                                              CM.isInLoopReduction(Phi),
8695                                              CM.useOrderedReductions(RdxDesc));
8696       } else {
8697         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8698       }
8699 
8700       // Record the incoming value from the backedge, so we can add the incoming
8701       // value from the backedge after all recipes have been created.
8702       recordRecipeOf(cast<Instruction>(
8703           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8704       PhisToFix.push_back(PhiRecipe);
8705     } else {
8706       // TODO: record backedge value for remaining pointer induction phis.
8707       assert(Phi->getType()->isPointerTy() &&
8708              "only pointer phis should be handled here");
8709       assert(Legal->getInductionVars().count(Phi) &&
8710              "Not an induction variable");
8711       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8712       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8713       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8714     }
8715 
8716     return toVPRecipeResult(PhiRecipe);
8717   }
8718 
8719   if (isa<TruncInst>(Instr) &&
8720       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8721                                                Range, *Plan)))
8722     return toVPRecipeResult(Recipe);
8723 
8724   if (!shouldWiden(Instr, Range))
8725     return nullptr;
8726 
8727   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8728     return toVPRecipeResult(new VPWidenGEPRecipe(
8729         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8730 
8731   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8732     bool InvariantCond =
8733         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8734     return toVPRecipeResult(new VPWidenSelectRecipe(
8735         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8736   }
8737 
8738   return toVPRecipeResult(tryToWiden(Instr, Operands));
8739 }
8740 
8741 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8742                                                         ElementCount MaxVF) {
8743   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8744 
8745   // Collect instructions from the original loop that will become trivially dead
8746   // in the vectorized loop. We don't need to vectorize these instructions. For
8747   // example, original induction update instructions can become dead because we
8748   // separately emit induction "steps" when generating code for the new loop.
8749   // Similarly, we create a new latch condition when setting up the structure
8750   // of the new loop, so the old one can become dead.
8751   SmallPtrSet<Instruction *, 4> DeadInstructions;
8752   collectTriviallyDeadInstructions(DeadInstructions);
8753 
8754   // Add assume instructions we need to drop to DeadInstructions, to prevent
8755   // them from being added to the VPlan.
8756   // TODO: We only need to drop assumes in blocks that get flattend. If the
8757   // control flow is preserved, we should keep them.
8758   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8759   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8760 
8761   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8762   // Dead instructions do not need sinking. Remove them from SinkAfter.
8763   for (Instruction *I : DeadInstructions)
8764     SinkAfter.erase(I);
8765 
8766   // Cannot sink instructions after dead instructions (there won't be any
8767   // recipes for them). Instead, find the first non-dead previous instruction.
8768   for (auto &P : Legal->getSinkAfter()) {
8769     Instruction *SinkTarget = P.second;
8770     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8771     (void)FirstInst;
8772     while (DeadInstructions.contains(SinkTarget)) {
8773       assert(
8774           SinkTarget != FirstInst &&
8775           "Must find a live instruction (at least the one feeding the "
8776           "first-order recurrence PHI) before reaching beginning of the block");
8777       SinkTarget = SinkTarget->getPrevNode();
8778       assert(SinkTarget != P.first &&
8779              "sink source equals target, no sinking required");
8780     }
8781     P.second = SinkTarget;
8782   }
8783 
8784   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8785   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8786     VFRange SubRange = {VF, MaxVFPlusOne};
8787     VPlans.push_back(
8788         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8789     VF = SubRange.End;
8790   }
8791 }
8792 
8793 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8794 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8795 // BranchOnCount VPInstruction to the latch.
8796 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8797                                   bool HasNUW, bool IsVPlanNative) {
8798   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8799   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8800 
8801   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8802   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8803   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8804   if (IsVPlanNative)
8805     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8806   Header->insert(CanonicalIVPHI, Header->begin());
8807 
8808   auto *CanonicalIVIncrement =
8809       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8810                                : VPInstruction::CanonicalIVIncrement,
8811                         {CanonicalIVPHI}, DL);
8812   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8813 
8814   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8815   if (IsVPlanNative) {
8816     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8817     EB->setCondBit(nullptr);
8818   }
8819   EB->appendRecipe(CanonicalIVIncrement);
8820 
8821   auto *BranchOnCount =
8822       new VPInstruction(VPInstruction::BranchOnCount,
8823                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8824   EB->appendRecipe(BranchOnCount);
8825 }
8826 
8827 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8828     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8829     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8830 
8831   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8832 
8833   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8834 
8835   // ---------------------------------------------------------------------------
8836   // Pre-construction: record ingredients whose recipes we'll need to further
8837   // process after constructing the initial VPlan.
8838   // ---------------------------------------------------------------------------
8839 
8840   // Mark instructions we'll need to sink later and their targets as
8841   // ingredients whose recipe we'll need to record.
8842   for (auto &Entry : SinkAfter) {
8843     RecipeBuilder.recordRecipeOf(Entry.first);
8844     RecipeBuilder.recordRecipeOf(Entry.second);
8845   }
8846   for (auto &Reduction : CM.getInLoopReductionChains()) {
8847     PHINode *Phi = Reduction.first;
8848     RecurKind Kind =
8849         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8850     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8851 
8852     RecipeBuilder.recordRecipeOf(Phi);
8853     for (auto &R : ReductionOperations) {
8854       RecipeBuilder.recordRecipeOf(R);
8855       // For min/max reducitons, where we have a pair of icmp/select, we also
8856       // need to record the ICmp recipe, so it can be removed later.
8857       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8858              "Only min/max recurrences allowed for inloop reductions");
8859       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8860         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8861     }
8862   }
8863 
8864   // For each interleave group which is relevant for this (possibly trimmed)
8865   // Range, add it to the set of groups to be later applied to the VPlan and add
8866   // placeholders for its members' Recipes which we'll be replacing with a
8867   // single VPInterleaveRecipe.
8868   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8869     auto applyIG = [IG, this](ElementCount VF) -> bool {
8870       return (VF.isVector() && // Query is illegal for VF == 1
8871               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8872                   LoopVectorizationCostModel::CM_Interleave);
8873     };
8874     if (!getDecisionAndClampRange(applyIG, Range))
8875       continue;
8876     InterleaveGroups.insert(IG);
8877     for (unsigned i = 0; i < IG->getFactor(); i++)
8878       if (Instruction *Member = IG->getMember(i))
8879         RecipeBuilder.recordRecipeOf(Member);
8880   };
8881 
8882   // ---------------------------------------------------------------------------
8883   // Build initial VPlan: Scan the body of the loop in a topological order to
8884   // visit each basic block after having visited its predecessor basic blocks.
8885   // ---------------------------------------------------------------------------
8886 
8887   // Create initial VPlan skeleton, with separate header and latch blocks.
8888   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
8889   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
8890   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
8891   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
8892   auto Plan = std::make_unique<VPlan>(TopRegion);
8893 
8894   Instruction *DLInst =
8895       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
8896   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
8897                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
8898                         !CM.foldTailByMasking(), false);
8899 
8900   // Scan the body of the loop in a topological order to visit each basic block
8901   // after having visited its predecessor basic blocks.
8902   LoopBlocksDFS DFS(OrigLoop);
8903   DFS.perform(LI);
8904 
8905   VPBasicBlock *VPBB = HeaderVPBB;
8906   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
8907   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8908     // Relevant instructions from basic block BB will be grouped into VPRecipe
8909     // ingredients and fill a new VPBasicBlock.
8910     unsigned VPBBsForBB = 0;
8911     VPBB->setName(BB->getName());
8912     Builder.setInsertPoint(VPBB);
8913 
8914     // Introduce each ingredient into VPlan.
8915     // TODO: Model and preserve debug instrinsics in VPlan.
8916     for (Instruction &I : BB->instructionsWithoutDebug()) {
8917       Instruction *Instr = &I;
8918 
8919       // First filter out irrelevant instructions, to ensure no recipes are
8920       // built for them.
8921       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8922         continue;
8923 
8924       SmallVector<VPValue *, 4> Operands;
8925       auto *Phi = dyn_cast<PHINode>(Instr);
8926       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
8927         Operands.push_back(Plan->getOrAddVPValue(
8928             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
8929       } else {
8930         auto OpRange = Plan->mapToVPValues(Instr->operands());
8931         Operands = {OpRange.begin(), OpRange.end()};
8932       }
8933       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
8934               Instr, Operands, Range, Plan)) {
8935         // If Instr can be simplified to an existing VPValue, use it.
8936         if (RecipeOrValue.is<VPValue *>()) {
8937           auto *VPV = RecipeOrValue.get<VPValue *>();
8938           Plan->addVPValue(Instr, VPV);
8939           // If the re-used value is a recipe, register the recipe for the
8940           // instruction, in case the recipe for Instr needs to be recorded.
8941           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
8942             RecipeBuilder.setRecipe(Instr, R);
8943           continue;
8944         }
8945         // Otherwise, add the new recipe.
8946         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8947         for (auto *Def : Recipe->definedValues()) {
8948           auto *UV = Def->getUnderlyingValue();
8949           Plan->addVPValue(UV, Def);
8950         }
8951 
8952         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
8953             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
8954           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
8955           // of the header block. That can happen for truncates of induction
8956           // variables. Those recipes are moved to the phi section of the header
8957           // block after applying SinkAfter, which relies on the original
8958           // position of the trunc.
8959           assert(isa<TruncInst>(Instr));
8960           InductionsToMove.push_back(
8961               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
8962         }
8963         RecipeBuilder.setRecipe(Instr, Recipe);
8964         VPBB->appendRecipe(Recipe);
8965         continue;
8966       }
8967 
8968       // Otherwise, if all widening options failed, Instruction is to be
8969       // replicated. This may create a successor for VPBB.
8970       VPBasicBlock *NextVPBB =
8971           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
8972       if (NextVPBB != VPBB) {
8973         VPBB = NextVPBB;
8974         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8975                                     : "");
8976       }
8977     }
8978 
8979     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
8980     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
8981   }
8982 
8983   // Fold the last, empty block into its predecessor.
8984   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
8985   assert(VPBB && "expected to fold last (empty) block");
8986   // After here, VPBB should not be used.
8987   VPBB = nullptr;
8988 
8989   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
8990          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
8991          "entry block must be set to a VPRegionBlock having a non-empty entry "
8992          "VPBasicBlock");
8993   RecipeBuilder.fixHeaderPhis();
8994 
8995   // ---------------------------------------------------------------------------
8996   // Transform initial VPlan: Apply previously taken decisions, in order, to
8997   // bring the VPlan to its final state.
8998   // ---------------------------------------------------------------------------
8999 
9000   // Apply Sink-After legal constraints.
9001   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9002     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9003     if (Region && Region->isReplicator()) {
9004       assert(Region->getNumSuccessors() == 1 &&
9005              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9006       assert(R->getParent()->size() == 1 &&
9007              "A recipe in an original replicator region must be the only "
9008              "recipe in its block");
9009       return Region;
9010     }
9011     return nullptr;
9012   };
9013   for (auto &Entry : SinkAfter) {
9014     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9015     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9016 
9017     auto *TargetRegion = GetReplicateRegion(Target);
9018     auto *SinkRegion = GetReplicateRegion(Sink);
9019     if (!SinkRegion) {
9020       // If the sink source is not a replicate region, sink the recipe directly.
9021       if (TargetRegion) {
9022         // The target is in a replication region, make sure to move Sink to
9023         // the block after it, not into the replication region itself.
9024         VPBasicBlock *NextBlock =
9025             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9026         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9027       } else
9028         Sink->moveAfter(Target);
9029       continue;
9030     }
9031 
9032     // The sink source is in a replicate region. Unhook the region from the CFG.
9033     auto *SinkPred = SinkRegion->getSinglePredecessor();
9034     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9035     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9036     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9037     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9038 
9039     if (TargetRegion) {
9040       // The target recipe is also in a replicate region, move the sink region
9041       // after the target region.
9042       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9043       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9044       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9045       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9046     } else {
9047       // The sink source is in a replicate region, we need to move the whole
9048       // replicate region, which should only contain a single recipe in the
9049       // main block.
9050       auto *SplitBlock =
9051           Target->getParent()->splitAt(std::next(Target->getIterator()));
9052 
9053       auto *SplitPred = SplitBlock->getSinglePredecessor();
9054 
9055       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9056       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9057       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9058     }
9059   }
9060 
9061   VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
9062   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9063 
9064   // Now that sink-after is done, move induction recipes for optimized truncates
9065   // to the phi section of the header block.
9066   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9067     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9068 
9069   // Adjust the recipes for any inloop reductions.
9070   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9071                              RecipeBuilder, Range.Start);
9072 
9073   // Introduce a recipe to combine the incoming and previous values of a
9074   // first-order recurrence.
9075   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9076     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9077     if (!RecurPhi)
9078       continue;
9079 
9080     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9081     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9082     auto *Region = GetReplicateRegion(PrevRecipe);
9083     if (Region)
9084       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9085     if (Region || PrevRecipe->isPhi())
9086       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9087     else
9088       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9089 
9090     auto *RecurSplice = cast<VPInstruction>(
9091         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9092                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9093 
9094     RecurPhi->replaceAllUsesWith(RecurSplice);
9095     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9096     // all users.
9097     RecurSplice->setOperand(0, RecurPhi);
9098   }
9099 
9100   // Interleave memory: for each Interleave Group we marked earlier as relevant
9101   // for this VPlan, replace the Recipes widening its memory instructions with a
9102   // single VPInterleaveRecipe at its insertion point.
9103   for (auto IG : InterleaveGroups) {
9104     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9105         RecipeBuilder.getRecipe(IG->getInsertPos()));
9106     SmallVector<VPValue *, 4> StoredValues;
9107     for (unsigned i = 0; i < IG->getFactor(); ++i)
9108       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9109         auto *StoreR =
9110             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9111         StoredValues.push_back(StoreR->getStoredValue());
9112       }
9113 
9114     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9115                                         Recipe->getMask());
9116     VPIG->insertBefore(Recipe);
9117     unsigned J = 0;
9118     for (unsigned i = 0; i < IG->getFactor(); ++i)
9119       if (Instruction *Member = IG->getMember(i)) {
9120         if (!Member->getType()->isVoidTy()) {
9121           VPValue *OriginalV = Plan->getVPValue(Member);
9122           Plan->removeVPValueFor(Member);
9123           Plan->addVPValue(Member, VPIG->getVPValue(J));
9124           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9125           J++;
9126         }
9127         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9128       }
9129   }
9130 
9131   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9132   // in ways that accessing values using original IR values is incorrect.
9133   Plan->disableValue2VPValue();
9134 
9135   VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
9136   VPlanTransforms::sinkScalarOperands(*Plan);
9137   VPlanTransforms::mergeReplicateRegions(*Plan);
9138   VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop);
9139 
9140   std::string PlanName;
9141   raw_string_ostream RSO(PlanName);
9142   ElementCount VF = Range.Start;
9143   Plan->addVF(VF);
9144   RSO << "Initial VPlan for VF={" << VF;
9145   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9146     Plan->addVF(VF);
9147     RSO << "," << VF;
9148   }
9149   RSO << "},UF>=1";
9150   RSO.flush();
9151   Plan->setName(PlanName);
9152 
9153   // Fold Exit block into its predecessor if possible.
9154   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9155   // VPBasicBlock as exit.
9156   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9157 
9158   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9159   return Plan;
9160 }
9161 
9162 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9163   // Outer loop handling: They may require CFG and instruction level
9164   // transformations before even evaluating whether vectorization is profitable.
9165   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9166   // the vectorization pipeline.
9167   assert(!OrigLoop->isInnermost());
9168   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9169 
9170   // Create new empty VPlan
9171   auto Plan = std::make_unique<VPlan>();
9172 
9173   // Build hierarchical CFG
9174   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9175   HCFGBuilder.buildHierarchicalCFG();
9176 
9177   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9178        VF *= 2)
9179     Plan->addVF(VF);
9180 
9181   if (EnableVPlanPredication) {
9182     VPlanPredicator VPP(*Plan);
9183     VPP.predicate();
9184 
9185     // Avoid running transformation to recipes until masked code generation in
9186     // VPlan-native path is in place.
9187     return Plan;
9188   }
9189 
9190   SmallPtrSet<Instruction *, 1> DeadInstructions;
9191   VPlanTransforms::VPInstructionsToVPRecipes(
9192       OrigLoop, Plan,
9193       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9194       DeadInstructions, *PSE.getSE());
9195 
9196   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9197                         true, true);
9198   return Plan;
9199 }
9200 
9201 // Adjust the recipes for reductions. For in-loop reductions the chain of
9202 // instructions leading from the loop exit instr to the phi need to be converted
9203 // to reductions, with one operand being vector and the other being the scalar
9204 // reduction chain. For other reductions, a select is introduced between the phi
9205 // and live-out recipes when folding the tail.
9206 void LoopVectorizationPlanner::adjustRecipesForReductions(
9207     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9208     ElementCount MinVF) {
9209   for (auto &Reduction : CM.getInLoopReductionChains()) {
9210     PHINode *Phi = Reduction.first;
9211     const RecurrenceDescriptor &RdxDesc =
9212         Legal->getReductionVars().find(Phi)->second;
9213     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9214 
9215     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9216       continue;
9217 
9218     // ReductionOperations are orders top-down from the phi's use to the
9219     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9220     // which of the two operands will remain scalar and which will be reduced.
9221     // For minmax the chain will be the select instructions.
9222     Instruction *Chain = Phi;
9223     for (Instruction *R : ReductionOperations) {
9224       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9225       RecurKind Kind = RdxDesc.getRecurrenceKind();
9226 
9227       VPValue *ChainOp = Plan->getVPValue(Chain);
9228       unsigned FirstOpId;
9229       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9230              "Only min/max recurrences allowed for inloop reductions");
9231       // Recognize a call to the llvm.fmuladd intrinsic.
9232       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9233       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9234              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9235       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9236         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9237                "Expected to replace a VPWidenSelectSC");
9238         FirstOpId = 1;
9239       } else {
9240         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9241                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9242                "Expected to replace a VPWidenSC");
9243         FirstOpId = 0;
9244       }
9245       unsigned VecOpId =
9246           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9247       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9248 
9249       auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent())
9250                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9251                          : nullptr;
9252 
9253       if (IsFMulAdd) {
9254         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9255         // need to create an fmul recipe to use as the vector operand for the
9256         // fadd reduction.
9257         VPInstruction *FMulRecipe = new VPInstruction(
9258             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9259         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9260         WidenRecipe->getParent()->insert(FMulRecipe,
9261                                          WidenRecipe->getIterator());
9262         VecOp = FMulRecipe;
9263       }
9264       VPReductionRecipe *RedRecipe =
9265           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9266       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9267       Plan->removeVPValueFor(R);
9268       Plan->addVPValue(R, RedRecipe);
9269       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9270       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9271       WidenRecipe->eraseFromParent();
9272 
9273       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9274         VPRecipeBase *CompareRecipe =
9275             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9276         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9277                "Expected to replace a VPWidenSC");
9278         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9279                "Expected no remaining users");
9280         CompareRecipe->eraseFromParent();
9281       }
9282       Chain = R;
9283     }
9284   }
9285 
9286   // If tail is folded by masking, introduce selects between the phi
9287   // and the live-out instruction of each reduction, at the beginning of the
9288   // dedicated latch block.
9289   if (CM.foldTailByMasking()) {
9290     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9291     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9292       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9293       if (!PhiR || PhiR->isInLoop())
9294         continue;
9295       VPValue *Cond =
9296           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9297       VPValue *Red = PhiR->getBackedgeValue();
9298       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9299              "reduction recipe must be defined before latch");
9300       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9301     }
9302   }
9303 }
9304 
9305 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9306 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9307                                VPSlotTracker &SlotTracker) const {
9308   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9309   IG->getInsertPos()->printAsOperand(O, false);
9310   O << ", ";
9311   getAddr()->printAsOperand(O, SlotTracker);
9312   VPValue *Mask = getMask();
9313   if (Mask) {
9314     O << ", ";
9315     Mask->printAsOperand(O, SlotTracker);
9316   }
9317 
9318   unsigned OpIdx = 0;
9319   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9320     if (!IG->getMember(i))
9321       continue;
9322     if (getNumStoreOperands() > 0) {
9323       O << "\n" << Indent << "  store ";
9324       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9325       O << " to index " << i;
9326     } else {
9327       O << "\n" << Indent << "  ";
9328       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9329       O << " = load from index " << i;
9330     }
9331     ++OpIdx;
9332   }
9333 }
9334 #endif
9335 
9336 void VPWidenCallRecipe::execute(VPTransformState &State) {
9337   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9338                                   *this, State);
9339 }
9340 
9341 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9342   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9343   State.ILV->setDebugLocFromInst(&I);
9344 
9345   // The condition can be loop invariant  but still defined inside the
9346   // loop. This means that we can't just use the original 'cond' value.
9347   // We have to take the 'vectorized' value and pick the first lane.
9348   // Instcombine will make this a no-op.
9349   auto *InvarCond =
9350       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9351 
9352   for (unsigned Part = 0; Part < State.UF; ++Part) {
9353     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9354     Value *Op0 = State.get(getOperand(1), Part);
9355     Value *Op1 = State.get(getOperand(2), Part);
9356     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9357     State.set(this, Sel, Part);
9358     State.ILV->addMetadata(Sel, &I);
9359   }
9360 }
9361 
9362 void VPWidenRecipe::execute(VPTransformState &State) {
9363   auto &I = *cast<Instruction>(getUnderlyingValue());
9364   auto &Builder = State.Builder;
9365   switch (I.getOpcode()) {
9366   case Instruction::Call:
9367   case Instruction::Br:
9368   case Instruction::PHI:
9369   case Instruction::GetElementPtr:
9370   case Instruction::Select:
9371     llvm_unreachable("This instruction is handled by a different recipe.");
9372   case Instruction::UDiv:
9373   case Instruction::SDiv:
9374   case Instruction::SRem:
9375   case Instruction::URem:
9376   case Instruction::Add:
9377   case Instruction::FAdd:
9378   case Instruction::Sub:
9379   case Instruction::FSub:
9380   case Instruction::FNeg:
9381   case Instruction::Mul:
9382   case Instruction::FMul:
9383   case Instruction::FDiv:
9384   case Instruction::FRem:
9385   case Instruction::Shl:
9386   case Instruction::LShr:
9387   case Instruction::AShr:
9388   case Instruction::And:
9389   case Instruction::Or:
9390   case Instruction::Xor: {
9391     // Just widen unops and binops.
9392     State.ILV->setDebugLocFromInst(&I);
9393 
9394     for (unsigned Part = 0; Part < State.UF; ++Part) {
9395       SmallVector<Value *, 2> Ops;
9396       for (VPValue *VPOp : operands())
9397         Ops.push_back(State.get(VPOp, Part));
9398 
9399       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9400 
9401       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9402         VecOp->copyIRFlags(&I);
9403 
9404         // If the instruction is vectorized and was in a basic block that needed
9405         // predication, we can't propagate poison-generating flags (nuw/nsw,
9406         // exact, etc.). The control flow has been linearized and the
9407         // instruction is no longer guarded by the predicate, which could make
9408         // the flag properties to no longer hold.
9409         if (State.MayGeneratePoisonRecipes.contains(this))
9410           VecOp->dropPoisonGeneratingFlags();
9411       }
9412 
9413       // Use this vector value for all users of the original instruction.
9414       State.set(this, V, Part);
9415       State.ILV->addMetadata(V, &I);
9416     }
9417 
9418     break;
9419   }
9420   case Instruction::ICmp:
9421   case Instruction::FCmp: {
9422     // Widen compares. Generate vector compares.
9423     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9424     auto *Cmp = cast<CmpInst>(&I);
9425     State.ILV->setDebugLocFromInst(Cmp);
9426     for (unsigned Part = 0; Part < State.UF; ++Part) {
9427       Value *A = State.get(getOperand(0), Part);
9428       Value *B = State.get(getOperand(1), Part);
9429       Value *C = nullptr;
9430       if (FCmp) {
9431         // Propagate fast math flags.
9432         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9433         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9434         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9435       } else {
9436         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9437       }
9438       State.set(this, C, Part);
9439       State.ILV->addMetadata(C, &I);
9440     }
9441 
9442     break;
9443   }
9444 
9445   case Instruction::ZExt:
9446   case Instruction::SExt:
9447   case Instruction::FPToUI:
9448   case Instruction::FPToSI:
9449   case Instruction::FPExt:
9450   case Instruction::PtrToInt:
9451   case Instruction::IntToPtr:
9452   case Instruction::SIToFP:
9453   case Instruction::UIToFP:
9454   case Instruction::Trunc:
9455   case Instruction::FPTrunc:
9456   case Instruction::BitCast: {
9457     auto *CI = cast<CastInst>(&I);
9458     State.ILV->setDebugLocFromInst(CI);
9459 
9460     /// Vectorize casts.
9461     Type *DestTy = (State.VF.isScalar())
9462                        ? CI->getType()
9463                        : VectorType::get(CI->getType(), State.VF);
9464 
9465     for (unsigned Part = 0; Part < State.UF; ++Part) {
9466       Value *A = State.get(getOperand(0), Part);
9467       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9468       State.set(this, Cast, Part);
9469       State.ILV->addMetadata(Cast, &I);
9470     }
9471     break;
9472   }
9473   default:
9474     // This instruction is not vectorized by simple widening.
9475     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9476     llvm_unreachable("Unhandled instruction!");
9477   } // end of switch.
9478 }
9479 
9480 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9481   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9482   // Construct a vector GEP by widening the operands of the scalar GEP as
9483   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9484   // results in a vector of pointers when at least one operand of the GEP
9485   // is vector-typed. Thus, to keep the representation compact, we only use
9486   // vector-typed operands for loop-varying values.
9487 
9488   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9489     // If we are vectorizing, but the GEP has only loop-invariant operands,
9490     // the GEP we build (by only using vector-typed operands for
9491     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9492     // produce a vector of pointers, we need to either arbitrarily pick an
9493     // operand to broadcast, or broadcast a clone of the original GEP.
9494     // Here, we broadcast a clone of the original.
9495     //
9496     // TODO: If at some point we decide to scalarize instructions having
9497     //       loop-invariant operands, this special case will no longer be
9498     //       required. We would add the scalarization decision to
9499     //       collectLoopScalars() and teach getVectorValue() to broadcast
9500     //       the lane-zero scalar value.
9501     auto *Clone = State.Builder.Insert(GEP->clone());
9502     for (unsigned Part = 0; Part < State.UF; ++Part) {
9503       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9504       State.set(this, EntryPart, Part);
9505       State.ILV->addMetadata(EntryPart, GEP);
9506     }
9507   } else {
9508     // If the GEP has at least one loop-varying operand, we are sure to
9509     // produce a vector of pointers. But if we are only unrolling, we want
9510     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9511     // produce with the code below will be scalar (if VF == 1) or vector
9512     // (otherwise). Note that for the unroll-only case, we still maintain
9513     // values in the vector mapping with initVector, as we do for other
9514     // instructions.
9515     for (unsigned Part = 0; Part < State.UF; ++Part) {
9516       // The pointer operand of the new GEP. If it's loop-invariant, we
9517       // won't broadcast it.
9518       auto *Ptr = IsPtrLoopInvariant
9519                       ? State.get(getOperand(0), VPIteration(0, 0))
9520                       : State.get(getOperand(0), Part);
9521 
9522       // Collect all the indices for the new GEP. If any index is
9523       // loop-invariant, we won't broadcast it.
9524       SmallVector<Value *, 4> Indices;
9525       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9526         VPValue *Operand = getOperand(I);
9527         if (IsIndexLoopInvariant[I - 1])
9528           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9529         else
9530           Indices.push_back(State.get(Operand, Part));
9531       }
9532 
9533       // If the GEP instruction is vectorized and was in a basic block that
9534       // needed predication, we can't propagate the poison-generating 'inbounds'
9535       // flag. The control flow has been linearized and the GEP is no longer
9536       // guarded by the predicate, which could make the 'inbounds' properties to
9537       // no longer hold.
9538       bool IsInBounds =
9539           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9540 
9541       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9542       // but it should be a vector, otherwise.
9543       auto *NewGEP = IsInBounds
9544                          ? State.Builder.CreateInBoundsGEP(
9545                                GEP->getSourceElementType(), Ptr, Indices)
9546                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9547                                                    Ptr, Indices);
9548       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9549              "NewGEP is not a pointer vector");
9550       State.set(this, NewGEP, Part);
9551       State.ILV->addMetadata(NewGEP, GEP);
9552     }
9553   }
9554 }
9555 
9556 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9557   assert(!State.Instance && "Int or FP induction being replicated.");
9558 
9559   Value *Start = getStartValue()->getLiveInIRValue();
9560   const InductionDescriptor &ID = getInductionDescriptor();
9561   TruncInst *Trunc = getTruncInst();
9562   IRBuilderBase &Builder = State.Builder;
9563   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
9564   assert(State.VF.isVector() && "must have vector VF");
9565 
9566   // The value from the original loop to which we are mapping the new induction
9567   // variable.
9568   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
9569 
9570   auto &DL = EntryVal->getModule()->getDataLayout();
9571 
9572   // Generate code for the induction step. Note that induction steps are
9573   // required to be loop-invariant
9574   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
9575     if (SE.isSCEVable(IV->getType())) {
9576       SCEVExpander Exp(SE, DL, "induction");
9577       return Exp.expandCodeFor(Step, Step->getType(),
9578                                State.CFG.VectorPreHeader->getTerminator());
9579     }
9580     return cast<SCEVUnknown>(Step)->getValue();
9581   };
9582 
9583   // Fast-math-flags propagate from the original induction instruction.
9584   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9585   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
9586     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
9587 
9588   // Now do the actual transformations, and start with creating the step value.
9589   Value *Step = CreateStepValue(ID.getStep());
9590 
9591   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
9592          "Expected either an induction phi-node or a truncate of it!");
9593 
9594   // Construct the initial value of the vector IV in the vector loop preheader
9595   auto CurrIP = Builder.saveIP();
9596   Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator());
9597   if (isa<TruncInst>(EntryVal)) {
9598     assert(Start->getType()->isIntegerTy() &&
9599            "Truncation requires an integer type");
9600     auto *TruncType = cast<IntegerType>(EntryVal->getType());
9601     Step = Builder.CreateTrunc(Step, TruncType);
9602     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
9603   }
9604 
9605   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
9606   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
9607   Value *SteppedStart = getStepVector(
9608       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
9609 
9610   // We create vector phi nodes for both integer and floating-point induction
9611   // variables. Here, we determine the kind of arithmetic we will perform.
9612   Instruction::BinaryOps AddOp;
9613   Instruction::BinaryOps MulOp;
9614   if (Step->getType()->isIntegerTy()) {
9615     AddOp = Instruction::Add;
9616     MulOp = Instruction::Mul;
9617   } else {
9618     AddOp = ID.getInductionOpcode();
9619     MulOp = Instruction::FMul;
9620   }
9621 
9622   // Multiply the vectorization factor by the step using integer or
9623   // floating-point arithmetic as appropriate.
9624   Type *StepType = Step->getType();
9625   Value *RuntimeVF;
9626   if (Step->getType()->isFloatingPointTy())
9627     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
9628   else
9629     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
9630   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
9631 
9632   // Create a vector splat to use in the induction update.
9633   //
9634   // FIXME: If the step is non-constant, we create the vector splat with
9635   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
9636   //        handle a constant vector splat.
9637   Value *SplatVF = isa<Constant>(Mul)
9638                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
9639                        : Builder.CreateVectorSplat(State.VF, Mul);
9640   Builder.restoreIP(CurrIP);
9641 
9642   // We may need to add the step a number of times, depending on the unroll
9643   // factor. The last of those goes into the PHI.
9644   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
9645                                     &*State.CFG.PrevBB->getFirstInsertionPt());
9646   VecInd->setDebugLoc(EntryVal->getDebugLoc());
9647   Instruction *LastInduction = VecInd;
9648   for (unsigned Part = 0; Part < State.UF; ++Part) {
9649     State.set(this, LastInduction, Part);
9650 
9651     if (isa<TruncInst>(EntryVal))
9652       State.ILV->addMetadata(LastInduction, EntryVal);
9653 
9654     LastInduction = cast<Instruction>(
9655         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
9656     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
9657   }
9658 
9659   // Move the last step to the end of the latch block. This ensures consistent
9660   // placement of all induction updates.
9661   auto *LoopVectorLatch =
9662       State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
9663   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
9664   LastInduction->moveBefore(Br);
9665   LastInduction->setName("vec.ind.next");
9666 
9667   VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader);
9668   VecInd->addIncoming(LastInduction, LoopVectorLatch);
9669 }
9670 
9671 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
9672   assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
9673 
9674   // Fast-math-flags propagate from the original induction instruction.
9675   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9676   if (IndDesc.getInductionBinOp() &&
9677       isa<FPMathOperator>(IndDesc.getInductionBinOp()))
9678     State.Builder.setFastMathFlags(
9679         IndDesc.getInductionBinOp()->getFastMathFlags());
9680 
9681   Value *Step = State.get(getStepValue(), VPIteration(0, 0));
9682   auto CreateScalarIV = [&](Value *&Step) -> Value * {
9683     Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
9684     auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9685     if (!isCanonical() || CanonicalIV->getType() != Ty) {
9686       ScalarIV =
9687           Ty->isIntegerTy()
9688               ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty)
9689               : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty);
9690       ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
9691                                       getStartValue()->getLiveInIRValue(), Step,
9692                                       IndDesc);
9693       ScalarIV->setName("offset.idx");
9694     }
9695     if (TruncToTy) {
9696       assert(Step->getType()->isIntegerTy() &&
9697              "Truncation requires an integer step");
9698       ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy);
9699       Step = State.Builder.CreateTrunc(Step, TruncToTy);
9700     }
9701     return ScalarIV;
9702   };
9703 
9704   Value *ScalarIV = CreateScalarIV(Step);
9705   if (State.VF.isVector()) {
9706     buildScalarSteps(ScalarIV, Step, IndDesc, this, State);
9707     return;
9708   }
9709 
9710   for (unsigned Part = 0; Part < State.UF; ++Part) {
9711     assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
9712     Value *EntryPart;
9713     if (Step->getType()->isFloatingPointTy()) {
9714       Value *StartIdx =
9715           getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part);
9716       // Floating-point operations inherit FMF via the builder's flags.
9717       Value *MulOp = State.Builder.CreateFMul(StartIdx, Step);
9718       EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(),
9719                                             ScalarIV, MulOp);
9720     } else {
9721       Value *StartIdx =
9722           getRuntimeVF(State.Builder, Step->getType(), State.VF * Part);
9723       EntryPart = State.Builder.CreateAdd(
9724           ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction");
9725     }
9726     State.set(this, EntryPart, Part);
9727   }
9728 }
9729 
9730 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9731   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9732                                  State);
9733 }
9734 
9735 void VPBlendRecipe::execute(VPTransformState &State) {
9736   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9737   // We know that all PHIs in non-header blocks are converted into
9738   // selects, so we don't have to worry about the insertion order and we
9739   // can just use the builder.
9740   // At this point we generate the predication tree. There may be
9741   // duplications since this is a simple recursive scan, but future
9742   // optimizations will clean it up.
9743 
9744   unsigned NumIncoming = getNumIncomingValues();
9745 
9746   // Generate a sequence of selects of the form:
9747   // SELECT(Mask3, In3,
9748   //        SELECT(Mask2, In2,
9749   //               SELECT(Mask1, In1,
9750   //                      In0)))
9751   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9752   // are essentially undef are taken from In0.
9753   InnerLoopVectorizer::VectorParts Entry(State.UF);
9754   for (unsigned In = 0; In < NumIncoming; ++In) {
9755     for (unsigned Part = 0; Part < State.UF; ++Part) {
9756       // We might have single edge PHIs (blocks) - use an identity
9757       // 'select' for the first PHI operand.
9758       Value *In0 = State.get(getIncomingValue(In), Part);
9759       if (In == 0)
9760         Entry[Part] = In0; // Initialize with the first incoming value.
9761       else {
9762         // Select between the current value and the previous incoming edge
9763         // based on the incoming mask.
9764         Value *Cond = State.get(getMask(In), Part);
9765         Entry[Part] =
9766             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9767       }
9768     }
9769   }
9770   for (unsigned Part = 0; Part < State.UF; ++Part)
9771     State.set(this, Entry[Part], Part);
9772 }
9773 
9774 void VPInterleaveRecipe::execute(VPTransformState &State) {
9775   assert(!State.Instance && "Interleave group being replicated.");
9776   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9777                                       getStoredValues(), getMask());
9778 }
9779 
9780 void VPReductionRecipe::execute(VPTransformState &State) {
9781   assert(!State.Instance && "Reduction being replicated.");
9782   Value *PrevInChain = State.get(getChainOp(), 0);
9783   RecurKind Kind = RdxDesc->getRecurrenceKind();
9784   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9785   // Propagate the fast-math flags carried by the underlying instruction.
9786   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9787   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9788   for (unsigned Part = 0; Part < State.UF; ++Part) {
9789     Value *NewVecOp = State.get(getVecOp(), Part);
9790     if (VPValue *Cond = getCondOp()) {
9791       Value *NewCond = State.get(Cond, Part);
9792       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9793       Value *Iden = RdxDesc->getRecurrenceIdentity(
9794           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9795       Value *IdenVec =
9796           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9797       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9798       NewVecOp = Select;
9799     }
9800     Value *NewRed;
9801     Value *NextInChain;
9802     if (IsOrdered) {
9803       if (State.VF.isVector())
9804         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9805                                         PrevInChain);
9806       else
9807         NewRed = State.Builder.CreateBinOp(
9808             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9809             NewVecOp);
9810       PrevInChain = NewRed;
9811     } else {
9812       PrevInChain = State.get(getChainOp(), Part);
9813       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9814     }
9815     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9816       NextInChain =
9817           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9818                          NewRed, PrevInChain);
9819     } else if (IsOrdered)
9820       NextInChain = NewRed;
9821     else
9822       NextInChain = State.Builder.CreateBinOp(
9823           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9824           PrevInChain);
9825     State.set(this, NextInChain, Part);
9826   }
9827 }
9828 
9829 void VPReplicateRecipe::execute(VPTransformState &State) {
9830   if (State.Instance) { // Generate a single instance.
9831     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9832     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9833                                     IsPredicated, State);
9834     // Insert scalar instance packing it into a vector.
9835     if (AlsoPack && State.VF.isVector()) {
9836       // If we're constructing lane 0, initialize to start from poison.
9837       if (State.Instance->Lane.isFirstLane()) {
9838         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9839         Value *Poison = PoisonValue::get(
9840             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9841         State.set(this, Poison, State.Instance->Part);
9842       }
9843       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9844     }
9845     return;
9846   }
9847 
9848   // Generate scalar instances for all VF lanes of all UF parts, unless the
9849   // instruction is uniform inwhich case generate only the first lane for each
9850   // of the UF parts.
9851   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9852   assert((!State.VF.isScalable() || IsUniform) &&
9853          "Can't scalarize a scalable vector");
9854   for (unsigned Part = 0; Part < State.UF; ++Part)
9855     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9856       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9857                                       VPIteration(Part, Lane), IsPredicated,
9858                                       State);
9859 }
9860 
9861 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9862   assert(State.Instance && "Branch on Mask works only on single instance.");
9863 
9864   unsigned Part = State.Instance->Part;
9865   unsigned Lane = State.Instance->Lane.getKnownLane();
9866 
9867   Value *ConditionBit = nullptr;
9868   VPValue *BlockInMask = getMask();
9869   if (BlockInMask) {
9870     ConditionBit = State.get(BlockInMask, Part);
9871     if (ConditionBit->getType()->isVectorTy())
9872       ConditionBit = State.Builder.CreateExtractElement(
9873           ConditionBit, State.Builder.getInt32(Lane));
9874   } else // Block in mask is all-one.
9875     ConditionBit = State.Builder.getTrue();
9876 
9877   // Replace the temporary unreachable terminator with a new conditional branch,
9878   // whose two destinations will be set later when they are created.
9879   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9880   assert(isa<UnreachableInst>(CurrentTerminator) &&
9881          "Expected to replace unreachable terminator with conditional branch.");
9882   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9883   CondBr->setSuccessor(0, nullptr);
9884   ReplaceInstWithInst(CurrentTerminator, CondBr);
9885 }
9886 
9887 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9888   assert(State.Instance && "Predicated instruction PHI works per instance.");
9889   Instruction *ScalarPredInst =
9890       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9891   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9892   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9893   assert(PredicatingBB && "Predicated block has no single predecessor.");
9894   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9895          "operand must be VPReplicateRecipe");
9896 
9897   // By current pack/unpack logic we need to generate only a single phi node: if
9898   // a vector value for the predicated instruction exists at this point it means
9899   // the instruction has vector users only, and a phi for the vector value is
9900   // needed. In this case the recipe of the predicated instruction is marked to
9901   // also do that packing, thereby "hoisting" the insert-element sequence.
9902   // Otherwise, a phi node for the scalar value is needed.
9903   unsigned Part = State.Instance->Part;
9904   if (State.hasVectorValue(getOperand(0), Part)) {
9905     Value *VectorValue = State.get(getOperand(0), Part);
9906     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9907     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9908     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9909     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9910     if (State.hasVectorValue(this, Part))
9911       State.reset(this, VPhi, Part);
9912     else
9913       State.set(this, VPhi, Part);
9914     // NOTE: Currently we need to update the value of the operand, so the next
9915     // predicated iteration inserts its generated value in the correct vector.
9916     State.reset(getOperand(0), VPhi, Part);
9917   } else {
9918     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9919     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9920     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9921                      PredicatingBB);
9922     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9923     if (State.hasScalarValue(this, *State.Instance))
9924       State.reset(this, Phi, *State.Instance);
9925     else
9926       State.set(this, Phi, *State.Instance);
9927     // NOTE: Currently we need to update the value of the operand, so the next
9928     // predicated iteration inserts its generated value in the correct vector.
9929     State.reset(getOperand(0), Phi, *State.Instance);
9930   }
9931 }
9932 
9933 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9934   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9935 
9936   // Attempt to issue a wide load.
9937   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9938   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9939 
9940   assert((LI || SI) && "Invalid Load/Store instruction");
9941   assert((!SI || StoredValue) && "No stored value provided for widened store");
9942   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9943 
9944   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9945 
9946   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9947   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9948   bool CreateGatherScatter = !Consecutive;
9949 
9950   auto &Builder = State.Builder;
9951   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9952   bool isMaskRequired = getMask();
9953   if (isMaskRequired)
9954     for (unsigned Part = 0; Part < State.UF; ++Part)
9955       BlockInMaskParts[Part] = State.get(getMask(), Part);
9956 
9957   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9958     // Calculate the pointer for the specific unroll-part.
9959     GetElementPtrInst *PartPtr = nullptr;
9960 
9961     bool InBounds = false;
9962     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9963       InBounds = gep->isInBounds();
9964     if (Reverse) {
9965       // If the address is consecutive but reversed, then the
9966       // wide store needs to start at the last vector element.
9967       // RunTimeVF =  VScale * VF.getKnownMinValue()
9968       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9969       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9970       // NumElt = -Part * RunTimeVF
9971       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9972       // LastLane = 1 - RunTimeVF
9973       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9974       PartPtr =
9975           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9976       PartPtr->setIsInBounds(InBounds);
9977       PartPtr = cast<GetElementPtrInst>(
9978           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9979       PartPtr->setIsInBounds(InBounds);
9980       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9981         BlockInMaskParts[Part] =
9982             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9983     } else {
9984       Value *Increment =
9985           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9986       PartPtr = cast<GetElementPtrInst>(
9987           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9988       PartPtr->setIsInBounds(InBounds);
9989     }
9990 
9991     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9992     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9993   };
9994 
9995   // Handle Stores:
9996   if (SI) {
9997     State.ILV->setDebugLocFromInst(SI);
9998 
9999     for (unsigned Part = 0; Part < State.UF; ++Part) {
10000       Instruction *NewSI = nullptr;
10001       Value *StoredVal = State.get(StoredValue, Part);
10002       if (CreateGatherScatter) {
10003         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10004         Value *VectorGep = State.get(getAddr(), Part);
10005         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
10006                                             MaskPart);
10007       } else {
10008         if (Reverse) {
10009           // If we store to reverse consecutive memory locations, then we need
10010           // to reverse the order of elements in the stored value.
10011           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
10012           // We don't want to update the value in the map as it might be used in
10013           // another expression. So don't call resetVectorValue(StoredVal).
10014         }
10015         auto *VecPtr =
10016             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10017         if (isMaskRequired)
10018           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
10019                                             BlockInMaskParts[Part]);
10020         else
10021           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10022       }
10023       State.ILV->addMetadata(NewSI, SI);
10024     }
10025     return;
10026   }
10027 
10028   // Handle loads.
10029   assert(LI && "Must have a load instruction");
10030   State.ILV->setDebugLocFromInst(LI);
10031   for (unsigned Part = 0; Part < State.UF; ++Part) {
10032     Value *NewLI;
10033     if (CreateGatherScatter) {
10034       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10035       Value *VectorGep = State.get(getAddr(), Part);
10036       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10037                                          nullptr, "wide.masked.gather");
10038       State.ILV->addMetadata(NewLI, LI);
10039     } else {
10040       auto *VecPtr =
10041           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10042       if (isMaskRequired)
10043         NewLI = Builder.CreateMaskedLoad(
10044             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10045             PoisonValue::get(DataTy), "wide.masked.load");
10046       else
10047         NewLI =
10048             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10049 
10050       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10051       State.ILV->addMetadata(NewLI, LI);
10052       if (Reverse)
10053         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10054     }
10055 
10056     State.set(this, NewLI, Part);
10057   }
10058 }
10059 
10060 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10061 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10062 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10063 // for predication.
10064 static ScalarEpilogueLowering getScalarEpilogueLowering(
10065     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10066     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10067     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10068     LoopVectorizationLegality &LVL) {
10069   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10070   // don't look at hints or options, and don't request a scalar epilogue.
10071   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10072   // LoopAccessInfo (due to code dependency and not being able to reliably get
10073   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10074   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10075   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10076   // back to the old way and vectorize with versioning when forced. See D81345.)
10077   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10078                                                       PGSOQueryType::IRPass) &&
10079                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10080     return CM_ScalarEpilogueNotAllowedOptSize;
10081 
10082   // 2) If set, obey the directives
10083   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10084     switch (PreferPredicateOverEpilogue) {
10085     case PreferPredicateTy::ScalarEpilogue:
10086       return CM_ScalarEpilogueAllowed;
10087     case PreferPredicateTy::PredicateElseScalarEpilogue:
10088       return CM_ScalarEpilogueNotNeededUsePredicate;
10089     case PreferPredicateTy::PredicateOrDontVectorize:
10090       return CM_ScalarEpilogueNotAllowedUsePredicate;
10091     };
10092   }
10093 
10094   // 3) If set, obey the hints
10095   switch (Hints.getPredicate()) {
10096   case LoopVectorizeHints::FK_Enabled:
10097     return CM_ScalarEpilogueNotNeededUsePredicate;
10098   case LoopVectorizeHints::FK_Disabled:
10099     return CM_ScalarEpilogueAllowed;
10100   };
10101 
10102   // 4) if the TTI hook indicates this is profitable, request predication.
10103   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10104                                        LVL.getLAI()))
10105     return CM_ScalarEpilogueNotNeededUsePredicate;
10106 
10107   return CM_ScalarEpilogueAllowed;
10108 }
10109 
10110 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10111   // If Values have been set for this Def return the one relevant for \p Part.
10112   if (hasVectorValue(Def, Part))
10113     return Data.PerPartOutput[Def][Part];
10114 
10115   if (!hasScalarValue(Def, {Part, 0})) {
10116     Value *IRV = Def->getLiveInIRValue();
10117     Value *B = ILV->getBroadcastInstrs(IRV);
10118     set(Def, B, Part);
10119     return B;
10120   }
10121 
10122   Value *ScalarValue = get(Def, {Part, 0});
10123   // If we aren't vectorizing, we can just copy the scalar map values over
10124   // to the vector map.
10125   if (VF.isScalar()) {
10126     set(Def, ScalarValue, Part);
10127     return ScalarValue;
10128   }
10129 
10130   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10131   bool IsUniform = RepR && RepR->isUniform();
10132 
10133   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10134   // Check if there is a scalar value for the selected lane.
10135   if (!hasScalarValue(Def, {Part, LastLane})) {
10136     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10137     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) ||
10138             isa<VPScalarIVStepsRecipe>(Def->getDef())) &&
10139            "unexpected recipe found to be invariant");
10140     IsUniform = true;
10141     LastLane = 0;
10142   }
10143 
10144   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10145   // Set the insert point after the last scalarized instruction or after the
10146   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10147   // will directly follow the scalar definitions.
10148   auto OldIP = Builder.saveIP();
10149   auto NewIP =
10150       isa<PHINode>(LastInst)
10151           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10152           : std::next(BasicBlock::iterator(LastInst));
10153   Builder.SetInsertPoint(&*NewIP);
10154 
10155   // However, if we are vectorizing, we need to construct the vector values.
10156   // If the value is known to be uniform after vectorization, we can just
10157   // broadcast the scalar value corresponding to lane zero for each unroll
10158   // iteration. Otherwise, we construct the vector values using
10159   // insertelement instructions. Since the resulting vectors are stored in
10160   // State, we will only generate the insertelements once.
10161   Value *VectorValue = nullptr;
10162   if (IsUniform) {
10163     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10164     set(Def, VectorValue, Part);
10165   } else {
10166     // Initialize packing with insertelements to start from undef.
10167     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10168     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10169     set(Def, Undef, Part);
10170     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10171       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10172     VectorValue = get(Def, Part);
10173   }
10174   Builder.restoreIP(OldIP);
10175   return VectorValue;
10176 }
10177 
10178 // Process the loop in the VPlan-native vectorization path. This path builds
10179 // VPlan upfront in the vectorization pipeline, which allows to apply
10180 // VPlan-to-VPlan transformations from the very beginning without modifying the
10181 // input LLVM IR.
10182 static bool processLoopInVPlanNativePath(
10183     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10184     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10185     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10186     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10187     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10188     LoopVectorizationRequirements &Requirements) {
10189 
10190   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10191     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10192     return false;
10193   }
10194   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10195   Function *F = L->getHeader()->getParent();
10196   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10197 
10198   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10199       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10200 
10201   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10202                                 &Hints, IAI);
10203   // Use the planner for outer loop vectorization.
10204   // TODO: CM is not used at this point inside the planner. Turn CM into an
10205   // optional argument if we don't need it in the future.
10206   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10207                                Requirements, ORE);
10208 
10209   // Get user vectorization factor.
10210   ElementCount UserVF = Hints.getWidth();
10211 
10212   CM.collectElementTypesForWidening();
10213 
10214   // Plan how to best vectorize, return the best VF and its cost.
10215   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10216 
10217   // If we are stress testing VPlan builds, do not attempt to generate vector
10218   // code. Masked vector code generation support will follow soon.
10219   // Also, do not attempt to vectorize if no vector code will be produced.
10220   if (VPlanBuildStressTest || EnableVPlanPredication ||
10221       VectorizationFactor::Disabled() == VF)
10222     return false;
10223 
10224   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10225 
10226   {
10227     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10228                              F->getParent()->getDataLayout());
10229     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10230                            &CM, BFI, PSI, Checks);
10231     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10232                       << L->getHeader()->getParent()->getName() << "\"\n");
10233     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10234   }
10235 
10236   // Mark the loop as already vectorized to avoid vectorizing again.
10237   Hints.setAlreadyVectorized();
10238   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10239   return true;
10240 }
10241 
10242 // Emit a remark if there are stores to floats that required a floating point
10243 // extension. If the vectorized loop was generated with floating point there
10244 // will be a performance penalty from the conversion overhead and the change in
10245 // the vector width.
10246 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10247   SmallVector<Instruction *, 4> Worklist;
10248   for (BasicBlock *BB : L->getBlocks()) {
10249     for (Instruction &Inst : *BB) {
10250       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10251         if (S->getValueOperand()->getType()->isFloatTy())
10252           Worklist.push_back(S);
10253       }
10254     }
10255   }
10256 
10257   // Traverse the floating point stores upwards searching, for floating point
10258   // conversions.
10259   SmallPtrSet<const Instruction *, 4> Visited;
10260   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10261   while (!Worklist.empty()) {
10262     auto *I = Worklist.pop_back_val();
10263     if (!L->contains(I))
10264       continue;
10265     if (!Visited.insert(I).second)
10266       continue;
10267 
10268     // Emit a remark if the floating point store required a floating
10269     // point conversion.
10270     // TODO: More work could be done to identify the root cause such as a
10271     // constant or a function return type and point the user to it.
10272     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10273       ORE->emit([&]() {
10274         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10275                                           I->getDebugLoc(), L->getHeader())
10276                << "floating point conversion changes vector width. "
10277                << "Mixed floating point precision requires an up/down "
10278                << "cast that will negatively impact performance.";
10279       });
10280 
10281     for (Use &Op : I->operands())
10282       if (auto *OpI = dyn_cast<Instruction>(Op))
10283         Worklist.push_back(OpI);
10284   }
10285 }
10286 
10287 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10288     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10289                                !EnableLoopInterleaving),
10290       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10291                               !EnableLoopVectorization) {}
10292 
10293 bool LoopVectorizePass::processLoop(Loop *L) {
10294   assert((EnableVPlanNativePath || L->isInnermost()) &&
10295          "VPlan-native path is not enabled. Only process inner loops.");
10296 
10297 #ifndef NDEBUG
10298   const std::string DebugLocStr = getDebugLocString(L);
10299 #endif /* NDEBUG */
10300 
10301   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
10302                     << L->getHeader()->getParent()->getName() << "' from "
10303                     << DebugLocStr << "\n");
10304 
10305   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10306 
10307   LLVM_DEBUG(
10308       dbgs() << "LV: Loop hints:"
10309              << " force="
10310              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10311                      ? "disabled"
10312                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10313                             ? "enabled"
10314                             : "?"))
10315              << " width=" << Hints.getWidth()
10316              << " interleave=" << Hints.getInterleave() << "\n");
10317 
10318   // Function containing loop
10319   Function *F = L->getHeader()->getParent();
10320 
10321   // Looking at the diagnostic output is the only way to determine if a loop
10322   // was vectorized (other than looking at the IR or machine code), so it
10323   // is important to generate an optimization remark for each loop. Most of
10324   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10325   // generated as OptimizationRemark and OptimizationRemarkMissed are
10326   // less verbose reporting vectorized loops and unvectorized loops that may
10327   // benefit from vectorization, respectively.
10328 
10329   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10330     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10331     return false;
10332   }
10333 
10334   PredicatedScalarEvolution PSE(*SE, *L);
10335 
10336   // Check if it is legal to vectorize the loop.
10337   LoopVectorizationRequirements Requirements;
10338   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10339                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10340   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10341     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10342     Hints.emitRemarkWithHints();
10343     return false;
10344   }
10345 
10346   // Check the function attributes and profiles to find out if this function
10347   // should be optimized for size.
10348   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10349       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10350 
10351   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10352   // here. They may require CFG and instruction level transformations before
10353   // even evaluating whether vectorization is profitable. Since we cannot modify
10354   // the incoming IR, we need to build VPlan upfront in the vectorization
10355   // pipeline.
10356   if (!L->isInnermost())
10357     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10358                                         ORE, BFI, PSI, Hints, Requirements);
10359 
10360   assert(L->isInnermost() && "Inner loop expected.");
10361 
10362   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10363   // count by optimizing for size, to minimize overheads.
10364   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10365   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10366     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10367                       << "This loop is worth vectorizing only if no scalar "
10368                       << "iteration overheads are incurred.");
10369     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10370       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10371     else {
10372       LLVM_DEBUG(dbgs() << "\n");
10373       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10374     }
10375   }
10376 
10377   // Check the function attributes to see if implicit floats are allowed.
10378   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10379   // an integer loop and the vector instructions selected are purely integer
10380   // vector instructions?
10381   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10382     reportVectorizationFailure(
10383         "Can't vectorize when the NoImplicitFloat attribute is used",
10384         "loop not vectorized due to NoImplicitFloat attribute",
10385         "NoImplicitFloat", ORE, L);
10386     Hints.emitRemarkWithHints();
10387     return false;
10388   }
10389 
10390   // Check if the target supports potentially unsafe FP vectorization.
10391   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10392   // for the target we're vectorizing for, to make sure none of the
10393   // additional fp-math flags can help.
10394   if (Hints.isPotentiallyUnsafe() &&
10395       TTI->isFPVectorizationPotentiallyUnsafe()) {
10396     reportVectorizationFailure(
10397         "Potentially unsafe FP op prevents vectorization",
10398         "loop not vectorized due to unsafe FP support.",
10399         "UnsafeFP", ORE, L);
10400     Hints.emitRemarkWithHints();
10401     return false;
10402   }
10403 
10404   bool AllowOrderedReductions;
10405   // If the flag is set, use that instead and override the TTI behaviour.
10406   if (ForceOrderedReductions.getNumOccurrences() > 0)
10407     AllowOrderedReductions = ForceOrderedReductions;
10408   else
10409     AllowOrderedReductions = TTI->enableOrderedReductions();
10410   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10411     ORE->emit([&]() {
10412       auto *ExactFPMathInst = Requirements.getExactFPInst();
10413       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10414                                                  ExactFPMathInst->getDebugLoc(),
10415                                                  ExactFPMathInst->getParent())
10416              << "loop not vectorized: cannot prove it is safe to reorder "
10417                 "floating-point operations";
10418     });
10419     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10420                          "reorder floating-point operations\n");
10421     Hints.emitRemarkWithHints();
10422     return false;
10423   }
10424 
10425   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10426   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10427 
10428   // If an override option has been passed in for interleaved accesses, use it.
10429   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10430     UseInterleaved = EnableInterleavedMemAccesses;
10431 
10432   // Analyze interleaved memory accesses.
10433   if (UseInterleaved) {
10434     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10435   }
10436 
10437   // Use the cost model.
10438   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10439                                 F, &Hints, IAI);
10440   CM.collectValuesToIgnore();
10441   CM.collectElementTypesForWidening();
10442 
10443   // Use the planner for vectorization.
10444   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10445                                Requirements, ORE);
10446 
10447   // Get user vectorization factor and interleave count.
10448   ElementCount UserVF = Hints.getWidth();
10449   unsigned UserIC = Hints.getInterleave();
10450 
10451   // Plan how to best vectorize, return the best VF and its cost.
10452   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10453 
10454   VectorizationFactor VF = VectorizationFactor::Disabled();
10455   unsigned IC = 1;
10456 
10457   if (MaybeVF) {
10458     VF = *MaybeVF;
10459     // Select the interleave count.
10460     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10461   }
10462 
10463   // Identify the diagnostic messages that should be produced.
10464   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10465   bool VectorizeLoop = true, InterleaveLoop = true;
10466   if (VF.Width.isScalar()) {
10467     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10468     VecDiagMsg = std::make_pair(
10469         "VectorizationNotBeneficial",
10470         "the cost-model indicates that vectorization is not beneficial");
10471     VectorizeLoop = false;
10472   }
10473 
10474   if (!MaybeVF && UserIC > 1) {
10475     // Tell the user interleaving was avoided up-front, despite being explicitly
10476     // requested.
10477     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10478                          "interleaving should be avoided up front\n");
10479     IntDiagMsg = std::make_pair(
10480         "InterleavingAvoided",
10481         "Ignoring UserIC, because interleaving was avoided up front");
10482     InterleaveLoop = false;
10483   } else if (IC == 1 && UserIC <= 1) {
10484     // Tell the user interleaving is not beneficial.
10485     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10486     IntDiagMsg = std::make_pair(
10487         "InterleavingNotBeneficial",
10488         "the cost-model indicates that interleaving is not beneficial");
10489     InterleaveLoop = false;
10490     if (UserIC == 1) {
10491       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10492       IntDiagMsg.second +=
10493           " and is explicitly disabled or interleave count is set to 1";
10494     }
10495   } else if (IC > 1 && UserIC == 1) {
10496     // Tell the user interleaving is beneficial, but it explicitly disabled.
10497     LLVM_DEBUG(
10498         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10499     IntDiagMsg = std::make_pair(
10500         "InterleavingBeneficialButDisabled",
10501         "the cost-model indicates that interleaving is beneficial "
10502         "but is explicitly disabled or interleave count is set to 1");
10503     InterleaveLoop = false;
10504   }
10505 
10506   // Override IC if user provided an interleave count.
10507   IC = UserIC > 0 ? UserIC : IC;
10508 
10509   // Emit diagnostic messages, if any.
10510   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10511   if (!VectorizeLoop && !InterleaveLoop) {
10512     // Do not vectorize or interleaving the loop.
10513     ORE->emit([&]() {
10514       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10515                                       L->getStartLoc(), L->getHeader())
10516              << VecDiagMsg.second;
10517     });
10518     ORE->emit([&]() {
10519       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10520                                       L->getStartLoc(), L->getHeader())
10521              << IntDiagMsg.second;
10522     });
10523     return false;
10524   } else if (!VectorizeLoop && InterleaveLoop) {
10525     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10526     ORE->emit([&]() {
10527       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10528                                         L->getStartLoc(), L->getHeader())
10529              << VecDiagMsg.second;
10530     });
10531   } else if (VectorizeLoop && !InterleaveLoop) {
10532     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10533                       << ") in " << DebugLocStr << '\n');
10534     ORE->emit([&]() {
10535       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10536                                         L->getStartLoc(), L->getHeader())
10537              << IntDiagMsg.second;
10538     });
10539   } else if (VectorizeLoop && InterleaveLoop) {
10540     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10541                       << ") in " << DebugLocStr << '\n');
10542     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10543   }
10544 
10545   bool DisableRuntimeUnroll = false;
10546   MDNode *OrigLoopID = L->getLoopID();
10547   {
10548     // Optimistically generate runtime checks. Drop them if they turn out to not
10549     // be profitable. Limit the scope of Checks, so the cleanup happens
10550     // immediately after vector codegeneration is done.
10551     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10552                              F->getParent()->getDataLayout());
10553     if (!VF.Width.isScalar() || IC > 1)
10554       Checks.Create(L, *LVL.getLAI(), PSE.getPredicate());
10555 
10556     using namespace ore;
10557     if (!VectorizeLoop) {
10558       assert(IC > 1 && "interleave count should not be 1 or 0");
10559       // If we decided that it is not legal to vectorize the loop, then
10560       // interleave it.
10561       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10562                                  &CM, BFI, PSI, Checks);
10563 
10564       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10565       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10566 
10567       ORE->emit([&]() {
10568         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10569                                   L->getHeader())
10570                << "interleaved loop (interleaved count: "
10571                << NV("InterleaveCount", IC) << ")";
10572       });
10573     } else {
10574       // If we decided that it is *legal* to vectorize the loop, then do it.
10575 
10576       // Consider vectorizing the epilogue too if it's profitable.
10577       VectorizationFactor EpilogueVF =
10578           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10579       if (EpilogueVF.Width.isVector()) {
10580 
10581         // The first pass vectorizes the main loop and creates a scalar epilogue
10582         // to be vectorized by executing the plan (potentially with a different
10583         // factor) again shortly afterwards.
10584         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10585         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10586                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10587 
10588         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10589         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10590                         DT);
10591         ++LoopsVectorized;
10592 
10593         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10594         formLCSSARecursively(*L, *DT, LI, SE);
10595 
10596         // Second pass vectorizes the epilogue and adjusts the control flow
10597         // edges from the first pass.
10598         EPI.MainLoopVF = EPI.EpilogueVF;
10599         EPI.MainLoopUF = EPI.EpilogueUF;
10600         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10601                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10602                                                  Checks);
10603 
10604         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10605 
10606         // Ensure that the start values for any VPReductionPHIRecipes are
10607         // updated before vectorising the epilogue loop.
10608         VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock();
10609         for (VPRecipeBase &R : Header->phis()) {
10610           if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10611             if (auto *Resume = MainILV.getReductionResumeValue(
10612                     ReductionPhi->getRecurrenceDescriptor())) {
10613               VPValue *StartVal = new VPValue(Resume);
10614               BestEpiPlan.addExternalDef(StartVal);
10615               ReductionPhi->setOperand(0, StartVal);
10616             }
10617           }
10618         }
10619 
10620         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10621                         DT);
10622         ++LoopsEpilogueVectorized;
10623 
10624         if (!MainILV.areSafetyChecksAdded())
10625           DisableRuntimeUnroll = true;
10626       } else {
10627         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10628                                &LVL, &CM, BFI, PSI, Checks);
10629 
10630         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10631         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10632         ++LoopsVectorized;
10633 
10634         // Add metadata to disable runtime unrolling a scalar loop when there
10635         // are no runtime checks about strides and memory. A scalar loop that is
10636         // rarely used is not worth unrolling.
10637         if (!LB.areSafetyChecksAdded())
10638           DisableRuntimeUnroll = true;
10639       }
10640       // Report the vectorization decision.
10641       ORE->emit([&]() {
10642         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10643                                   L->getHeader())
10644                << "vectorized loop (vectorization width: "
10645                << NV("VectorizationFactor", VF.Width)
10646                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10647       });
10648     }
10649 
10650     if (ORE->allowExtraAnalysis(LV_NAME))
10651       checkMixedPrecision(L, ORE);
10652   }
10653 
10654   Optional<MDNode *> RemainderLoopID =
10655       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10656                                       LLVMLoopVectorizeFollowupEpilogue});
10657   if (RemainderLoopID.hasValue()) {
10658     L->setLoopID(RemainderLoopID.getValue());
10659   } else {
10660     if (DisableRuntimeUnroll)
10661       AddRuntimeUnrollDisableMetaData(L);
10662 
10663     // Mark the loop as already vectorized to avoid vectorizing again.
10664     Hints.setAlreadyVectorized();
10665   }
10666 
10667   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10668   return true;
10669 }
10670 
10671 LoopVectorizeResult LoopVectorizePass::runImpl(
10672     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10673     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10674     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10675     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10676     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10677   SE = &SE_;
10678   LI = &LI_;
10679   TTI = &TTI_;
10680   DT = &DT_;
10681   BFI = &BFI_;
10682   TLI = TLI_;
10683   AA = &AA_;
10684   AC = &AC_;
10685   GetLAA = &GetLAA_;
10686   DB = &DB_;
10687   ORE = &ORE_;
10688   PSI = PSI_;
10689 
10690   // Don't attempt if
10691   // 1. the target claims to have no vector registers, and
10692   // 2. interleaving won't help ILP.
10693   //
10694   // The second condition is necessary because, even if the target has no
10695   // vector registers, loop vectorization may still enable scalar
10696   // interleaving.
10697   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10698       TTI->getMaxInterleaveFactor(1) < 2)
10699     return LoopVectorizeResult(false, false);
10700 
10701   bool Changed = false, CFGChanged = false;
10702 
10703   // The vectorizer requires loops to be in simplified form.
10704   // Since simplification may add new inner loops, it has to run before the
10705   // legality and profitability checks. This means running the loop vectorizer
10706   // will simplify all loops, regardless of whether anything end up being
10707   // vectorized.
10708   for (auto &L : *LI)
10709     Changed |= CFGChanged |=
10710         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10711 
10712   // Build up a worklist of inner-loops to vectorize. This is necessary as
10713   // the act of vectorizing or partially unrolling a loop creates new loops
10714   // and can invalidate iterators across the loops.
10715   SmallVector<Loop *, 8> Worklist;
10716 
10717   for (Loop *L : *LI)
10718     collectSupportedLoops(*L, LI, ORE, Worklist);
10719 
10720   LoopsAnalyzed += Worklist.size();
10721 
10722   // Now walk the identified inner loops.
10723   while (!Worklist.empty()) {
10724     Loop *L = Worklist.pop_back_val();
10725 
10726     // For the inner loops we actually process, form LCSSA to simplify the
10727     // transform.
10728     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10729 
10730     Changed |= CFGChanged |= processLoop(L);
10731   }
10732 
10733   // Process each loop nest in the function.
10734   return LoopVectorizeResult(Changed, CFGChanged);
10735 }
10736 
10737 PreservedAnalyses LoopVectorizePass::run(Function &F,
10738                                          FunctionAnalysisManager &AM) {
10739     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10740     auto &LI = AM.getResult<LoopAnalysis>(F);
10741     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10742     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10743     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10744     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10745     auto &AA = AM.getResult<AAManager>(F);
10746     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10747     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10748     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10749 
10750     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10751     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10752         [&](Loop &L) -> const LoopAccessInfo & {
10753       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10754                                         TLI, TTI, nullptr, nullptr, nullptr};
10755       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10756     };
10757     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10758     ProfileSummaryInfo *PSI =
10759         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10760     LoopVectorizeResult Result =
10761         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10762     if (!Result.MadeAnyChange)
10763       return PreservedAnalyses::all();
10764     PreservedAnalyses PA;
10765 
10766     // We currently do not preserve loopinfo/dominator analyses with outer loop
10767     // vectorization. Until this is addressed, mark these analyses as preserved
10768     // only for non-VPlan-native path.
10769     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10770     if (!EnableVPlanNativePath) {
10771       PA.preserve<LoopAnalysis>();
10772       PA.preserve<DominatorTreeAnalysis>();
10773     }
10774 
10775     if (Result.MadeCFGChange) {
10776       // Making CFG changes likely means a loop got vectorized. Indicate that
10777       // extra simplification passes should be run.
10778       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10779       // be run if runtime checks have been added.
10780       AM.getResult<ShouldRunExtraVectorPasses>(F);
10781       PA.preserve<ShouldRunExtraVectorPasses>();
10782     } else {
10783       PA.preserveSet<CFGAnalyses>();
10784     }
10785     return PA;
10786 }
10787 
10788 void LoopVectorizePass::printPipeline(
10789     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10790   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10791       OS, MapClassName2PassName);
10792 
10793   OS << "<";
10794   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10795   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10796   OS << ">";
10797 }
10798