1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/Metadata.h"
116 #include "llvm/IR/Module.h"
117 #include "llvm/IR/Operator.h"
118 #include "llvm/IR/PatternMatch.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <functional>
147 #include <iterator>
148 #include <limits>
149 #include <memory>
150 #include <string>
151 #include <tuple>
152 #include <utility>
153 
154 using namespace llvm;
155 
156 #define LV_NAME "loop-vectorize"
157 #define DEBUG_TYPE LV_NAME
158 
159 #ifndef NDEBUG
160 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
161 #endif
162 
163 /// @{
164 /// Metadata attribute names
165 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
166 const char LLVMLoopVectorizeFollowupVectorized[] =
167     "llvm.loop.vectorize.followup_vectorized";
168 const char LLVMLoopVectorizeFollowupEpilogue[] =
169     "llvm.loop.vectorize.followup_epilogue";
170 /// @}
171 
172 STATISTIC(LoopsVectorized, "Number of loops vectorized");
173 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
174 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
175 
176 static cl::opt<bool> EnableEpilogueVectorization(
177     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
178     cl::desc("Enable vectorization of epilogue loops."));
179 
180 static cl::opt<unsigned> EpilogueVectorizationForceVF(
181     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
182     cl::desc("When epilogue vectorization is enabled, and a value greater than "
183              "1 is specified, forces the given VF for all applicable epilogue "
184              "loops."));
185 
186 static cl::opt<unsigned> EpilogueVectorizationMinVF(
187     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
188     cl::desc("Only loops with vectorization factor equal to or larger than "
189              "the specified value are considered for epilogue vectorization."));
190 
191 /// Loops with a known constant trip count below this number are vectorized only
192 /// if no scalar iteration overheads are incurred.
193 static cl::opt<unsigned> TinyTripCountVectorThreshold(
194     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
195     cl::desc("Loops with a constant trip count that is smaller than this "
196              "value are vectorized only if no scalar iteration overheads "
197              "are incurred."));
198 
199 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
200     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
201     cl::desc("The maximum allowed number of runtime memory checks with a "
202              "vectorize(enable) pragma."));
203 
204 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
205 // that predication is preferred, and this lists all options. I.e., the
206 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
207 // and predicate the instructions accordingly. If tail-folding fails, there are
208 // different fallback strategies depending on these values:
209 namespace PreferPredicateTy {
210   enum Option {
211     ScalarEpilogue = 0,
212     PredicateElseScalarEpilogue,
213     PredicateOrDontVectorize
214   };
215 } // namespace PreferPredicateTy
216 
217 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
218     "prefer-predicate-over-epilogue",
219     cl::init(PreferPredicateTy::ScalarEpilogue),
220     cl::Hidden,
221     cl::desc("Tail-folding and predication preferences over creating a scalar "
222              "epilogue loop."),
223     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
224                          "scalar-epilogue",
225                          "Don't tail-predicate loops, create scalar epilogue"),
226               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
227                          "predicate-else-scalar-epilogue",
228                          "prefer tail-folding, create scalar epilogue if tail "
229                          "folding fails."),
230               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
231                          "predicate-dont-vectorize",
232                          "prefers tail-folding, don't attempt vectorization if "
233                          "tail-folding fails.")));
234 
235 static cl::opt<bool> MaximizeBandwidth(
236     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
237     cl::desc("Maximize bandwidth when selecting vectorization factor which "
238              "will be determined by the smallest type in loop."));
239 
240 static cl::opt<bool> EnableInterleavedMemAccesses(
241     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
242     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
243 
244 /// An interleave-group may need masking if it resides in a block that needs
245 /// predication, or in order to mask away gaps.
246 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
247     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
248     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
249 
250 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
251     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
252     cl::desc("We don't interleave loops with a estimated constant trip count "
253              "below this number"));
254 
255 static cl::opt<unsigned> ForceTargetNumScalarRegs(
256     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
257     cl::desc("A flag that overrides the target's number of scalar registers."));
258 
259 static cl::opt<unsigned> ForceTargetNumVectorRegs(
260     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
261     cl::desc("A flag that overrides the target's number of vector registers."));
262 
263 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
264     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
265     cl::desc("A flag that overrides the target's max interleave factor for "
266              "scalar loops."));
267 
268 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
269     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
270     cl::desc("A flag that overrides the target's max interleave factor for "
271              "vectorized loops."));
272 
273 static cl::opt<unsigned> ForceTargetInstructionCost(
274     "force-target-instruction-cost", cl::init(0), cl::Hidden,
275     cl::desc("A flag that overrides the target's expected cost for "
276              "an instruction to a single constant value. Mostly "
277              "useful for getting consistent testing."));
278 
279 static cl::opt<bool> ForceTargetSupportsScalableVectors(
280     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
281     cl::desc(
282         "Pretend that scalable vectors are supported, even if the target does "
283         "not support them. This flag should only be used for testing."));
284 
285 static cl::opt<unsigned> SmallLoopCost(
286     "small-loop-cost", cl::init(20), cl::Hidden,
287     cl::desc(
288         "The cost of a loop that is considered 'small' by the interleaver."));
289 
290 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
291     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
292     cl::desc("Enable the use of the block frequency analysis to access PGO "
293              "heuristics minimizing code growth in cold regions and being more "
294              "aggressive in hot regions."));
295 
296 // Runtime interleave loops for load/store throughput.
297 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
298     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
299     cl::desc(
300         "Enable runtime interleaving until load/store ports are saturated"));
301 
302 /// Interleave small loops with scalar reductions.
303 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
304     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
305     cl::desc("Enable interleaving for loops with small iteration counts that "
306              "contain scalar reductions to expose ILP."));
307 
308 /// The number of stores in a loop that are allowed to need predication.
309 static cl::opt<unsigned> NumberOfStoresToPredicate(
310     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
311     cl::desc("Max number of stores to be predicated behind an if."));
312 
313 static cl::opt<bool> EnableIndVarRegisterHeur(
314     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
315     cl::desc("Count the induction variable only once when interleaving"));
316 
317 static cl::opt<bool> EnableCondStoresVectorization(
318     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
319     cl::desc("Enable if predication of stores during vectorization."));
320 
321 static cl::opt<unsigned> MaxNestedScalarReductionIC(
322     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
323     cl::desc("The maximum interleave count to use when interleaving a scalar "
324              "reduction in a nested loop."));
325 
326 static cl::opt<bool>
327     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
328                            cl::Hidden,
329                            cl::desc("Prefer in-loop vector reductions, "
330                                     "overriding the targets preference."));
331 
332 static cl::opt<bool> ForceOrderedReductions(
333     "force-ordered-reductions", cl::init(false), cl::Hidden,
334     cl::desc("Enable the vectorisation of loops with in-order (strict) "
335              "FP reductions"));
336 
337 static cl::opt<bool> PreferPredicatedReductionSelect(
338     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
339     cl::desc(
340         "Prefer predicating a reduction operation over an after loop select."));
341 
342 cl::opt<bool> EnableVPlanNativePath(
343     "enable-vplan-native-path", cl::init(false), cl::Hidden,
344     cl::desc("Enable VPlan-native vectorization path with "
345              "support for outer loop vectorization."));
346 
347 // FIXME: Remove this switch once we have divergence analysis. Currently we
348 // assume divergent non-backedge branches when this switch is true.
349 cl::opt<bool> EnableVPlanPredication(
350     "enable-vplan-predication", cl::init(false), cl::Hidden,
351     cl::desc("Enable VPlan-native vectorization path predicator with "
352              "support for outer loop vectorization."));
353 
354 // This flag enables the stress testing of the VPlan H-CFG construction in the
355 // VPlan-native vectorization path. It must be used in conjuction with
356 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
357 // verification of the H-CFGs built.
358 static cl::opt<bool> VPlanBuildStressTest(
359     "vplan-build-stress-test", cl::init(false), cl::Hidden,
360     cl::desc(
361         "Build VPlan for every supported loop nest in the function and bail "
362         "out right after the build (stress test the VPlan H-CFG construction "
363         "in the VPlan-native vectorization path)."));
364 
365 cl::opt<bool> llvm::EnableLoopInterleaving(
366     "interleave-loops", cl::init(true), cl::Hidden,
367     cl::desc("Enable loop interleaving in Loop vectorization passes"));
368 cl::opt<bool> llvm::EnableLoopVectorization(
369     "vectorize-loops", cl::init(true), cl::Hidden,
370     cl::desc("Run the Loop vectorization passes"));
371 
372 cl::opt<bool> PrintVPlansInDotFormat(
373     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
374     cl::desc("Use dot format instead of plain text when dumping VPlans"));
375 
376 /// A helper function that returns true if the given type is irregular. The
377 /// type is irregular if its allocated size doesn't equal the store size of an
378 /// element of the corresponding vector type.
379 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
380   // Determine if an array of N elements of type Ty is "bitcast compatible"
381   // with a <N x Ty> vector.
382   // This is only true if there is no padding between the array elements.
383   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
384 }
385 
386 /// A helper function that returns the reciprocal of the block probability of
387 /// predicated blocks. If we return X, we are assuming the predicated block
388 /// will execute once for every X iterations of the loop header.
389 ///
390 /// TODO: We should use actual block probability here, if available. Currently,
391 ///       we always assume predicated blocks have a 50% chance of executing.
392 static unsigned getReciprocalPredBlockProb() { return 2; }
393 
394 /// A helper function that returns an integer or floating-point constant with
395 /// value C.
396 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
397   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
398                            : ConstantFP::get(Ty, C);
399 }
400 
401 /// Returns "best known" trip count for the specified loop \p L as defined by
402 /// the following procedure:
403 ///   1) Returns exact trip count if it is known.
404 ///   2) Returns expected trip count according to profile data if any.
405 ///   3) Returns upper bound estimate if it is known.
406 ///   4) Returns None if all of the above failed.
407 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
408   // Check if exact trip count is known.
409   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
410     return ExpectedTC;
411 
412   // Check if there is an expected trip count available from profile data.
413   if (LoopVectorizeWithBlockFrequency)
414     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
415       return EstimatedTC;
416 
417   // Check if upper bound estimate is known.
418   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
419     return ExpectedTC;
420 
421   return None;
422 }
423 
424 // Forward declare GeneratedRTChecks.
425 class GeneratedRTChecks;
426 
427 namespace llvm {
428 
429 AnalysisKey ShouldRunExtraVectorPasses::Key;
430 
431 /// InnerLoopVectorizer vectorizes loops which contain only one basic
432 /// block to a specified vectorization factor (VF).
433 /// This class performs the widening of scalars into vectors, or multiple
434 /// scalars. This class also implements the following features:
435 /// * It inserts an epilogue loop for handling loops that don't have iteration
436 ///   counts that are known to be a multiple of the vectorization factor.
437 /// * It handles the code generation for reduction variables.
438 /// * Scalarization (implementation using scalars) of un-vectorizable
439 ///   instructions.
440 /// InnerLoopVectorizer does not perform any vectorization-legality
441 /// checks, and relies on the caller to check for the different legality
442 /// aspects. The InnerLoopVectorizer relies on the
443 /// LoopVectorizationLegality class to provide information about the induction
444 /// and reduction variables that were found to a given vectorization factor.
445 class InnerLoopVectorizer {
446 public:
447   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
448                       LoopInfo *LI, DominatorTree *DT,
449                       const TargetLibraryInfo *TLI,
450                       const TargetTransformInfo *TTI, AssumptionCache *AC,
451                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
452                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
453                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
454                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
455       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
456         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
457         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
458         PSI(PSI), RTChecks(RTChecks) {
459     // Query this against the original loop and save it here because the profile
460     // of the original loop header may change as the transformation happens.
461     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
462         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
463   }
464 
465   virtual ~InnerLoopVectorizer() = default;
466 
467   /// Create a new empty loop that will contain vectorized instructions later
468   /// on, while the old loop will be used as the scalar remainder. Control flow
469   /// is generated around the vectorized (and scalar epilogue) loops consisting
470   /// of various checks and bypasses. Return the pre-header block of the new
471   /// loop and the start value for the canonical induction, if it is != 0. The
472   /// latter is the case when vectorizing the epilogue loop. In the case of
473   /// epilogue vectorization, this function is overriden to handle the more
474   /// complex control flow around the loops.
475   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
476 
477   /// Widen a single call instruction within the innermost loop.
478   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
479                             VPTransformState &State);
480 
481   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
482   void fixVectorizedLoop(VPTransformState &State);
483 
484   // Return true if any runtime check is added.
485   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
486 
487   /// A type for vectorized values in the new loop. Each value from the
488   /// original loop, when vectorized, is represented by UF vector values in the
489   /// new unrolled loop, where UF is the unroll factor.
490   using VectorParts = SmallVector<Value *, 2>;
491 
492   /// Vectorize a single first-order recurrence or pointer induction PHINode in
493   /// a block. This method handles the induction variable canonicalization. It
494   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
495   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
496                            VPTransformState &State);
497 
498   /// A helper function to scalarize a single Instruction in the innermost loop.
499   /// Generates a sequence of scalar instances for each lane between \p MinLane
500   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
501   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
502   /// Instr's operands.
503   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
504                             const VPIteration &Instance, bool IfPredicateInstr,
505                             VPTransformState &State);
506 
507   /// Construct the vector value of a scalarized value \p V one lane at a time.
508   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
509                                  VPTransformState &State);
510 
511   /// Try to vectorize interleaved access group \p Group with the base address
512   /// given in \p Addr, optionally masking the vector operations if \p
513   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
514   /// values in the vectorized loop.
515   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
516                                 ArrayRef<VPValue *> VPDefs,
517                                 VPTransformState &State, VPValue *Addr,
518                                 ArrayRef<VPValue *> StoredValues,
519                                 VPValue *BlockInMask = nullptr);
520 
521   /// Set the debug location in the builder \p Ptr using the debug location in
522   /// \p V. If \p Ptr is None then it uses the class member's Builder.
523   void setDebugLocFromInst(const Value *V,
524                            Optional<IRBuilderBase *> CustomBuilder = None);
525 
526   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
527   void fixNonInductionPHIs(VPTransformState &State);
528 
529   /// Returns true if the reordering of FP operations is not allowed, but we are
530   /// able to vectorize with strict in-order reductions for the given RdxDesc.
531   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
532 
533   /// Create a broadcast instruction. This method generates a broadcast
534   /// instruction (shuffle) for loop invariant values and for the induction
535   /// value. If this is the induction variable then we extend it to N, N+1, ...
536   /// this is needed because each iteration in the loop corresponds to a SIMD
537   /// element.
538   virtual Value *getBroadcastInstrs(Value *V);
539 
540   /// Add metadata from one instruction to another.
541   ///
542   /// This includes both the original MDs from \p From and additional ones (\see
543   /// addNewMetadata).  Use this for *newly created* instructions in the vector
544   /// loop.
545   void addMetadata(Instruction *To, Instruction *From);
546 
547   /// Similar to the previous function but it adds the metadata to a
548   /// vector of instructions.
549   void addMetadata(ArrayRef<Value *> To, Instruction *From);
550 
551   // Returns the resume value (bc.merge.rdx) for a reduction as
552   // generated by fixReduction.
553   PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
554 
555 protected:
556   friend class LoopVectorizationPlanner;
557 
558   /// A small list of PHINodes.
559   using PhiVector = SmallVector<PHINode *, 4>;
560 
561   /// A type for scalarized values in the new loop. Each value from the
562   /// original loop, when scalarized, is represented by UF x VF scalar values
563   /// in the new unrolled loop, where UF is the unroll factor and VF is the
564   /// vectorization factor.
565   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
566 
567   /// Set up the values of the IVs correctly when exiting the vector loop.
568   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
569                     Value *CountRoundDown, Value *EndValue,
570                     BasicBlock *MiddleBlock, BasicBlock *VectorHeader);
571 
572   /// Introduce a conditional branch (on true, condition to be set later) at the
573   /// end of the header=latch connecting it to itself (across the backedge) and
574   /// to the exit block of \p L.
575   void createHeaderBranch(Loop *L);
576 
577   /// Handle all cross-iteration phis in the header.
578   void fixCrossIterationPHIs(VPTransformState &State);
579 
580   /// Create the exit value of first order recurrences in the middle block and
581   /// update their users.
582   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
583                                VPTransformState &State);
584 
585   /// Create code for the loop exit value of the reduction.
586   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
587 
588   /// Clear NSW/NUW flags from reduction instructions if necessary.
589   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
590                                VPTransformState &State);
591 
592   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
593   /// means we need to add the appropriate incoming value from the middle
594   /// block as exiting edges from the scalar epilogue loop (if present) are
595   /// already in place, and we exit the vector loop exclusively to the middle
596   /// block.
597   void fixLCSSAPHIs(VPTransformState &State);
598 
599   /// Iteratively sink the scalarized operands of a predicated instruction into
600   /// the block that was created for it.
601   void sinkScalarOperands(Instruction *PredInst);
602 
603   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
604   /// represented as.
605   void truncateToMinimalBitwidths(VPTransformState &State);
606 
607   /// Returns (and creates if needed) the original loop trip count.
608   Value *getOrCreateTripCount(Loop *NewLoop);
609 
610   /// Returns (and creates if needed) the trip count of the widened loop.
611   Value *getOrCreateVectorTripCount(Loop *NewLoop);
612 
613   /// Returns a bitcasted value to the requested vector type.
614   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
615   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
616                                 const DataLayout &DL);
617 
618   /// Emit a bypass check to see if the vector trip count is zero, including if
619   /// it overflows.
620   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
621 
622   /// Emit a bypass check to see if all of the SCEV assumptions we've
623   /// had to make are correct. Returns the block containing the checks or
624   /// nullptr if no checks have been added.
625   BasicBlock *emitSCEVChecks(BasicBlock *Bypass);
626 
627   /// Emit bypass checks to check any memory assumptions we may have made.
628   /// Returns the block containing the checks or nullptr if no checks have been
629   /// added.
630   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
631 
632   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
633   /// vector loop preheader, middle block and scalar preheader. Also
634   /// allocate a loop object for the new vector loop and return it.
635   Loop *createVectorLoopSkeleton(StringRef Prefix);
636 
637   /// Create new phi nodes for the induction variables to resume iteration count
638   /// in the scalar epilogue, from where the vectorized loop left off.
639   /// In cases where the loop skeleton is more complicated (eg. epilogue
640   /// vectorization) and the resume values can come from an additional bypass
641   /// block, the \p AdditionalBypass pair provides information about the bypass
642   /// block and the end value on the edge from bypass to this loop.
643   void createInductionResumeValues(
644       Loop *L,
645       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
646 
647   /// Complete the loop skeleton by adding debug MDs, creating appropriate
648   /// conditional branches in the middle block, preparing the builder and
649   /// running the verifier. Take in the vector loop \p L as argument, and return
650   /// the preheader of the completed vector loop.
651   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
652 
653   /// Add additional metadata to \p To that was not present on \p Orig.
654   ///
655   /// Currently this is used to add the noalias annotations based on the
656   /// inserted memchecks.  Use this for instructions that are *cloned* into the
657   /// vector loop.
658   void addNewMetadata(Instruction *To, const Instruction *Orig);
659 
660   /// Collect poison-generating recipes that may generate a poison value that is
661   /// used after vectorization, even when their operands are not poison. Those
662   /// recipes meet the following conditions:
663   ///  * Contribute to the address computation of a recipe generating a widen
664   ///    memory load/store (VPWidenMemoryInstructionRecipe or
665   ///    VPInterleaveRecipe).
666   ///  * Such a widen memory load/store has at least one underlying Instruction
667   ///    that is in a basic block that needs predication and after vectorization
668   ///    the generated instruction won't be predicated.
669   void collectPoisonGeneratingRecipes(VPTransformState &State);
670 
671   /// Allow subclasses to override and print debug traces before/after vplan
672   /// execution, when trace information is requested.
673   virtual void printDebugTracesAtStart(){};
674   virtual void printDebugTracesAtEnd(){};
675 
676   /// The original loop.
677   Loop *OrigLoop;
678 
679   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
680   /// dynamic knowledge to simplify SCEV expressions and converts them to a
681   /// more usable form.
682   PredicatedScalarEvolution &PSE;
683 
684   /// Loop Info.
685   LoopInfo *LI;
686 
687   /// Dominator Tree.
688   DominatorTree *DT;
689 
690   /// Alias Analysis.
691   AAResults *AA;
692 
693   /// Target Library Info.
694   const TargetLibraryInfo *TLI;
695 
696   /// Target Transform Info.
697   const TargetTransformInfo *TTI;
698 
699   /// Assumption Cache.
700   AssumptionCache *AC;
701 
702   /// Interface to emit optimization remarks.
703   OptimizationRemarkEmitter *ORE;
704 
705   /// LoopVersioning.  It's only set up (non-null) if memchecks were
706   /// used.
707   ///
708   /// This is currently only used to add no-alias metadata based on the
709   /// memchecks.  The actually versioning is performed manually.
710   std::unique_ptr<LoopVersioning> LVer;
711 
712   /// The vectorization SIMD factor to use. Each vector will have this many
713   /// vector elements.
714   ElementCount VF;
715 
716   /// The vectorization unroll factor to use. Each scalar is vectorized to this
717   /// many different vector instructions.
718   unsigned UF;
719 
720   /// The builder that we use
721   IRBuilder<> Builder;
722 
723   // --- Vectorization state ---
724 
725   /// The vector-loop preheader.
726   BasicBlock *LoopVectorPreHeader;
727 
728   /// The scalar-loop preheader.
729   BasicBlock *LoopScalarPreHeader;
730 
731   /// Middle Block between the vector and the scalar.
732   BasicBlock *LoopMiddleBlock;
733 
734   /// The unique ExitBlock of the scalar loop if one exists.  Note that
735   /// there can be multiple exiting edges reaching this block.
736   BasicBlock *LoopExitBlock;
737 
738   /// The scalar loop body.
739   BasicBlock *LoopScalarBody;
740 
741   /// A list of all bypass blocks. The first block is the entry of the loop.
742   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
743 
744   /// Store instructions that were predicated.
745   SmallVector<Instruction *, 4> PredicatedInstructions;
746 
747   /// Trip count of the original loop.
748   Value *TripCount = nullptr;
749 
750   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
751   Value *VectorTripCount = nullptr;
752 
753   /// The legality analysis.
754   LoopVectorizationLegality *Legal;
755 
756   /// The profitablity analysis.
757   LoopVectorizationCostModel *Cost;
758 
759   // Record whether runtime checks are added.
760   bool AddedSafetyChecks = false;
761 
762   // Holds the end values for each induction variable. We save the end values
763   // so we can later fix-up the external users of the induction variables.
764   DenseMap<PHINode *, Value *> IVEndValues;
765 
766   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
767   // fixed up at the end of vector code generation.
768   SmallVector<PHINode *, 8> OrigPHIsToFix;
769 
770   /// BFI and PSI are used to check for profile guided size optimizations.
771   BlockFrequencyInfo *BFI;
772   ProfileSummaryInfo *PSI;
773 
774   // Whether this loop should be optimized for size based on profile guided size
775   // optimizatios.
776   bool OptForSizeBasedOnProfile;
777 
778   /// Structure to hold information about generated runtime checks, responsible
779   /// for cleaning the checks, if vectorization turns out unprofitable.
780   GeneratedRTChecks &RTChecks;
781 
782   // Holds the resume values for reductions in the loops, used to set the
783   // correct start value of reduction PHIs when vectorizing the epilogue.
784   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
785       ReductionResumeValues;
786 };
787 
788 class InnerLoopUnroller : public InnerLoopVectorizer {
789 public:
790   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
791                     LoopInfo *LI, DominatorTree *DT,
792                     const TargetLibraryInfo *TLI,
793                     const TargetTransformInfo *TTI, AssumptionCache *AC,
794                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
795                     LoopVectorizationLegality *LVL,
796                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
797                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
798       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
799                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
800                             BFI, PSI, Check) {}
801 
802 private:
803   Value *getBroadcastInstrs(Value *V) override;
804 };
805 
806 /// Encapsulate information regarding vectorization of a loop and its epilogue.
807 /// This information is meant to be updated and used across two stages of
808 /// epilogue vectorization.
809 struct EpilogueLoopVectorizationInfo {
810   ElementCount MainLoopVF = ElementCount::getFixed(0);
811   unsigned MainLoopUF = 0;
812   ElementCount EpilogueVF = ElementCount::getFixed(0);
813   unsigned EpilogueUF = 0;
814   BasicBlock *MainLoopIterationCountCheck = nullptr;
815   BasicBlock *EpilogueIterationCountCheck = nullptr;
816   BasicBlock *SCEVSafetyCheck = nullptr;
817   BasicBlock *MemSafetyCheck = nullptr;
818   Value *TripCount = nullptr;
819   Value *VectorTripCount = nullptr;
820 
821   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
822                                 ElementCount EVF, unsigned EUF)
823       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
824     assert(EUF == 1 &&
825            "A high UF for the epilogue loop is likely not beneficial.");
826   }
827 };
828 
829 /// An extension of the inner loop vectorizer that creates a skeleton for a
830 /// vectorized loop that has its epilogue (residual) also vectorized.
831 /// The idea is to run the vplan on a given loop twice, firstly to setup the
832 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
833 /// from the first step and vectorize the epilogue.  This is achieved by
834 /// deriving two concrete strategy classes from this base class and invoking
835 /// them in succession from the loop vectorizer planner.
836 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
837 public:
838   InnerLoopAndEpilogueVectorizer(
839       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
840       DominatorTree *DT, const TargetLibraryInfo *TLI,
841       const TargetTransformInfo *TTI, AssumptionCache *AC,
842       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
843       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
844       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
845       GeneratedRTChecks &Checks)
846       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
847                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
848                             Checks),
849         EPI(EPI) {}
850 
851   // Override this function to handle the more complex control flow around the
852   // three loops.
853   std::pair<BasicBlock *, Value *>
854   createVectorizedLoopSkeleton() final override {
855     return createEpilogueVectorizedLoopSkeleton();
856   }
857 
858   /// The interface for creating a vectorized skeleton using one of two
859   /// different strategies, each corresponding to one execution of the vplan
860   /// as described above.
861   virtual std::pair<BasicBlock *, Value *>
862   createEpilogueVectorizedLoopSkeleton() = 0;
863 
864   /// Holds and updates state information required to vectorize the main loop
865   /// and its epilogue in two separate passes. This setup helps us avoid
866   /// regenerating and recomputing runtime safety checks. It also helps us to
867   /// shorten the iteration-count-check path length for the cases where the
868   /// iteration count of the loop is so small that the main vector loop is
869   /// completely skipped.
870   EpilogueLoopVectorizationInfo &EPI;
871 };
872 
873 /// A specialized derived class of inner loop vectorizer that performs
874 /// vectorization of *main* loops in the process of vectorizing loops and their
875 /// epilogues.
876 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
877 public:
878   EpilogueVectorizerMainLoop(
879       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
880       DominatorTree *DT, const TargetLibraryInfo *TLI,
881       const TargetTransformInfo *TTI, AssumptionCache *AC,
882       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
883       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
884       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
885       GeneratedRTChecks &Check)
886       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
887                                        EPI, LVL, CM, BFI, PSI, Check) {}
888   /// Implements the interface for creating a vectorized skeleton using the
889   /// *main loop* strategy (ie the first pass of vplan execution).
890   std::pair<BasicBlock *, Value *>
891   createEpilogueVectorizedLoopSkeleton() final override;
892 
893 protected:
894   /// Emits an iteration count bypass check once for the main loop (when \p
895   /// ForEpilogue is false) and once for the epilogue loop (when \p
896   /// ForEpilogue is true).
897   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
898                                              bool ForEpilogue);
899   void printDebugTracesAtStart() override;
900   void printDebugTracesAtEnd() override;
901 };
902 
903 // A specialized derived class of inner loop vectorizer that performs
904 // vectorization of *epilogue* loops in the process of vectorizing loops and
905 // their epilogues.
906 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
907 public:
908   EpilogueVectorizerEpilogueLoop(
909       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
910       DominatorTree *DT, const TargetLibraryInfo *TLI,
911       const TargetTransformInfo *TTI, AssumptionCache *AC,
912       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
913       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
914       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
915       GeneratedRTChecks &Checks)
916       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
917                                        EPI, LVL, CM, BFI, PSI, Checks) {}
918   /// Implements the interface for creating a vectorized skeleton using the
919   /// *epilogue loop* strategy (ie the second pass of vplan execution).
920   std::pair<BasicBlock *, Value *>
921   createEpilogueVectorizedLoopSkeleton() final override;
922 
923 protected:
924   /// Emits an iteration count bypass check after the main vector loop has
925   /// finished to see if there are any iterations left to execute by either
926   /// the vector epilogue or the scalar epilogue.
927   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(
928                                                       BasicBlock *Bypass,
929                                                       BasicBlock *Insert);
930   void printDebugTracesAtStart() override;
931   void printDebugTracesAtEnd() override;
932 };
933 } // end namespace llvm
934 
935 /// Look for a meaningful debug location on the instruction or it's
936 /// operands.
937 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
938   if (!I)
939     return I;
940 
941   DebugLoc Empty;
942   if (I->getDebugLoc() != Empty)
943     return I;
944 
945   for (Use &Op : I->operands()) {
946     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
947       if (OpInst->getDebugLoc() != Empty)
948         return OpInst;
949   }
950 
951   return I;
952 }
953 
954 void InnerLoopVectorizer::setDebugLocFromInst(
955     const Value *V, Optional<IRBuilderBase *> CustomBuilder) {
956   IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
957   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
958     const DILocation *DIL = Inst->getDebugLoc();
959 
960     // When a FSDiscriminator is enabled, we don't need to add the multiply
961     // factors to the discriminators.
962     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
963         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
964       // FIXME: For scalable vectors, assume vscale=1.
965       auto NewDIL =
966           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
967       if (NewDIL)
968         B->SetCurrentDebugLocation(NewDIL.getValue());
969       else
970         LLVM_DEBUG(dbgs()
971                    << "Failed to create new discriminator: "
972                    << DIL->getFilename() << " Line: " << DIL->getLine());
973     } else
974       B->SetCurrentDebugLocation(DIL);
975   } else
976     B->SetCurrentDebugLocation(DebugLoc());
977 }
978 
979 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
980 /// is passed, the message relates to that particular instruction.
981 #ifndef NDEBUG
982 static void debugVectorizationMessage(const StringRef Prefix,
983                                       const StringRef DebugMsg,
984                                       Instruction *I) {
985   dbgs() << "LV: " << Prefix << DebugMsg;
986   if (I != nullptr)
987     dbgs() << " " << *I;
988   else
989     dbgs() << '.';
990   dbgs() << '\n';
991 }
992 #endif
993 
994 /// Create an analysis remark that explains why vectorization failed
995 ///
996 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
997 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
998 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
999 /// the location of the remark.  \return the remark object that can be
1000 /// streamed to.
1001 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1002     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1003   Value *CodeRegion = TheLoop->getHeader();
1004   DebugLoc DL = TheLoop->getStartLoc();
1005 
1006   if (I) {
1007     CodeRegion = I->getParent();
1008     // If there is no debug location attached to the instruction, revert back to
1009     // using the loop's.
1010     if (I->getDebugLoc())
1011       DL = I->getDebugLoc();
1012   }
1013 
1014   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1015 }
1016 
1017 namespace llvm {
1018 
1019 /// Return a value for Step multiplied by VF.
1020 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
1021                        int64_t Step) {
1022   assert(Ty->isIntegerTy() && "Expected an integer step");
1023   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1024   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1025 }
1026 
1027 /// Return the runtime value for VF.
1028 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) {
1029   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1030   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1031 }
1032 
1033 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy,
1034                                   ElementCount VF) {
1035   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1036   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1037   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1038   return B.CreateUIToFP(RuntimeVF, FTy);
1039 }
1040 
1041 void reportVectorizationFailure(const StringRef DebugMsg,
1042                                 const StringRef OREMsg, const StringRef ORETag,
1043                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1044                                 Instruction *I) {
1045   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1046   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1047   ORE->emit(
1048       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1049       << "loop not vectorized: " << OREMsg);
1050 }
1051 
1052 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1053                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1054                              Instruction *I) {
1055   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1056   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1057   ORE->emit(
1058       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1059       << Msg);
1060 }
1061 
1062 } // end namespace llvm
1063 
1064 #ifndef NDEBUG
1065 /// \return string containing a file name and a line # for the given loop.
1066 static std::string getDebugLocString(const Loop *L) {
1067   std::string Result;
1068   if (L) {
1069     raw_string_ostream OS(Result);
1070     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1071       LoopDbgLoc.print(OS);
1072     else
1073       // Just print the module name.
1074       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1075     OS.flush();
1076   }
1077   return Result;
1078 }
1079 #endif
1080 
1081 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1082                                          const Instruction *Orig) {
1083   // If the loop was versioned with memchecks, add the corresponding no-alias
1084   // metadata.
1085   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1086     LVer->annotateInstWithNoAlias(To, Orig);
1087 }
1088 
1089 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1090     VPTransformState &State) {
1091 
1092   // Collect recipes in the backward slice of `Root` that may generate a poison
1093   // value that is used after vectorization.
1094   SmallPtrSet<VPRecipeBase *, 16> Visited;
1095   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1096     SmallVector<VPRecipeBase *, 16> Worklist;
1097     Worklist.push_back(Root);
1098 
1099     // Traverse the backward slice of Root through its use-def chain.
1100     while (!Worklist.empty()) {
1101       VPRecipeBase *CurRec = Worklist.back();
1102       Worklist.pop_back();
1103 
1104       if (!Visited.insert(CurRec).second)
1105         continue;
1106 
1107       // Prune search if we find another recipe generating a widen memory
1108       // instruction. Widen memory instructions involved in address computation
1109       // will lead to gather/scatter instructions, which don't need to be
1110       // handled.
1111       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1112           isa<VPInterleaveRecipe>(CurRec) ||
1113           isa<VPScalarIVStepsRecipe>(CurRec) ||
1114           isa<VPCanonicalIVPHIRecipe>(CurRec))
1115         continue;
1116 
1117       // This recipe contributes to the address computation of a widen
1118       // load/store. Collect recipe if its underlying instruction has
1119       // poison-generating flags.
1120       Instruction *Instr = CurRec->getUnderlyingInstr();
1121       if (Instr && Instr->hasPoisonGeneratingFlags())
1122         State.MayGeneratePoisonRecipes.insert(CurRec);
1123 
1124       // Add new definitions to the worklist.
1125       for (VPValue *operand : CurRec->operands())
1126         if (VPDef *OpDef = operand->getDef())
1127           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1128     }
1129   });
1130 
1131   // Traverse all the recipes in the VPlan and collect the poison-generating
1132   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1133   // VPInterleaveRecipe.
1134   auto Iter = depth_first(
1135       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1136   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1137     for (VPRecipeBase &Recipe : *VPBB) {
1138       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1139         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1140         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1141         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1142             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1143           collectPoisonGeneratingInstrsInBackwardSlice(
1144               cast<VPRecipeBase>(AddrDef));
1145       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1146         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1147         if (AddrDef) {
1148           // Check if any member of the interleave group needs predication.
1149           const InterleaveGroup<Instruction> *InterGroup =
1150               InterleaveRec->getInterleaveGroup();
1151           bool NeedPredication = false;
1152           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1153                I < NumMembers; ++I) {
1154             Instruction *Member = InterGroup->getMember(I);
1155             if (Member)
1156               NeedPredication |=
1157                   Legal->blockNeedsPredication(Member->getParent());
1158           }
1159 
1160           if (NeedPredication)
1161             collectPoisonGeneratingInstrsInBackwardSlice(
1162                 cast<VPRecipeBase>(AddrDef));
1163         }
1164       }
1165     }
1166   }
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(Instruction *To,
1170                                       Instruction *From) {
1171   propagateMetadata(To, From);
1172   addNewMetadata(To, From);
1173 }
1174 
1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1176                                       Instruction *From) {
1177   for (Value *V : To) {
1178     if (Instruction *I = dyn_cast<Instruction>(V))
1179       addMetadata(I, From);
1180   }
1181 }
1182 
1183 PHINode *InnerLoopVectorizer::getReductionResumeValue(
1184     const RecurrenceDescriptor &RdxDesc) {
1185   auto It = ReductionResumeValues.find(&RdxDesc);
1186   assert(It != ReductionResumeValues.end() &&
1187          "Expected to find a resume value for the reduction.");
1188   return It->second;
1189 }
1190 
1191 namespace llvm {
1192 
1193 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1194 // lowered.
1195 enum ScalarEpilogueLowering {
1196 
1197   // The default: allowing scalar epilogues.
1198   CM_ScalarEpilogueAllowed,
1199 
1200   // Vectorization with OptForSize: don't allow epilogues.
1201   CM_ScalarEpilogueNotAllowedOptSize,
1202 
1203   // A special case of vectorisation with OptForSize: loops with a very small
1204   // trip count are considered for vectorization under OptForSize, thereby
1205   // making sure the cost of their loop body is dominant, free of runtime
1206   // guards and scalar iteration overheads.
1207   CM_ScalarEpilogueNotAllowedLowTripLoop,
1208 
1209   // Loop hint predicate indicating an epilogue is undesired.
1210   CM_ScalarEpilogueNotNeededUsePredicate,
1211 
1212   // Directive indicating we must either tail fold or not vectorize
1213   CM_ScalarEpilogueNotAllowedUsePredicate
1214 };
1215 
1216 /// ElementCountComparator creates a total ordering for ElementCount
1217 /// for the purposes of using it in a set structure.
1218 struct ElementCountComparator {
1219   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1220     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1221            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1222   }
1223 };
1224 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1225 
1226 /// LoopVectorizationCostModel - estimates the expected speedups due to
1227 /// vectorization.
1228 /// In many cases vectorization is not profitable. This can happen because of
1229 /// a number of reasons. In this class we mainly attempt to predict the
1230 /// expected speedup/slowdowns due to the supported instruction set. We use the
1231 /// TargetTransformInfo to query the different backends for the cost of
1232 /// different operations.
1233 class LoopVectorizationCostModel {
1234 public:
1235   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1236                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1237                              LoopVectorizationLegality *Legal,
1238                              const TargetTransformInfo &TTI,
1239                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1240                              AssumptionCache *AC,
1241                              OptimizationRemarkEmitter *ORE, const Function *F,
1242                              const LoopVectorizeHints *Hints,
1243                              InterleavedAccessInfo &IAI)
1244       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1245         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1246         Hints(Hints), InterleaveInfo(IAI) {}
1247 
1248   /// \return An upper bound for the vectorization factors (both fixed and
1249   /// scalable). If the factors are 0, vectorization and interleaving should be
1250   /// avoided up front.
1251   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1252 
1253   /// \return True if runtime checks are required for vectorization, and false
1254   /// otherwise.
1255   bool runtimeChecksRequired();
1256 
1257   /// \return The most profitable vectorization factor and the cost of that VF.
1258   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1259   /// then this vectorization factor will be selected if vectorization is
1260   /// possible.
1261   VectorizationFactor
1262   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1263 
1264   VectorizationFactor
1265   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1266                                     const LoopVectorizationPlanner &LVP);
1267 
1268   /// Setup cost-based decisions for user vectorization factor.
1269   /// \return true if the UserVF is a feasible VF to be chosen.
1270   bool selectUserVectorizationFactor(ElementCount UserVF) {
1271     collectUniformsAndScalars(UserVF);
1272     collectInstsToScalarize(UserVF);
1273     return expectedCost(UserVF).first.isValid();
1274   }
1275 
1276   /// \return The size (in bits) of the smallest and widest types in the code
1277   /// that needs to be vectorized. We ignore values that remain scalar such as
1278   /// 64 bit loop indices.
1279   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1280 
1281   /// \return The desired interleave count.
1282   /// If interleave count has been specified by metadata it will be returned.
1283   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1284   /// are the selected vectorization factor and the cost of the selected VF.
1285   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1286 
1287   /// Memory access instruction may be vectorized in more than one way.
1288   /// Form of instruction after vectorization depends on cost.
1289   /// This function takes cost-based decisions for Load/Store instructions
1290   /// and collects them in a map. This decisions map is used for building
1291   /// the lists of loop-uniform and loop-scalar instructions.
1292   /// The calculated cost is saved with widening decision in order to
1293   /// avoid redundant calculations.
1294   void setCostBasedWideningDecision(ElementCount VF);
1295 
1296   /// A struct that represents some properties of the register usage
1297   /// of a loop.
1298   struct RegisterUsage {
1299     /// Holds the number of loop invariant values that are used in the loop.
1300     /// The key is ClassID of target-provided register class.
1301     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1302     /// Holds the maximum number of concurrent live intervals in the loop.
1303     /// The key is ClassID of target-provided register class.
1304     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1305   };
1306 
1307   /// \return Returns information about the register usages of the loop for the
1308   /// given vectorization factors.
1309   SmallVector<RegisterUsage, 8>
1310   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1311 
1312   /// Collect values we want to ignore in the cost model.
1313   void collectValuesToIgnore();
1314 
1315   /// Collect all element types in the loop for which widening is needed.
1316   void collectElementTypesForWidening();
1317 
1318   /// Split reductions into those that happen in the loop, and those that happen
1319   /// outside. In loop reductions are collected into InLoopReductionChains.
1320   void collectInLoopReductions();
1321 
1322   /// Returns true if we should use strict in-order reductions for the given
1323   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1324   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1325   /// of FP operations.
1326   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1327     return !Hints->allowReordering() && RdxDesc.isOrdered();
1328   }
1329 
1330   /// \returns The smallest bitwidth each instruction can be represented with.
1331   /// The vector equivalents of these instructions should be truncated to this
1332   /// type.
1333   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1334     return MinBWs;
1335   }
1336 
1337   /// \returns True if it is more profitable to scalarize instruction \p I for
1338   /// vectorization factor \p VF.
1339   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1340     assert(VF.isVector() &&
1341            "Profitable to scalarize relevant only for VF > 1.");
1342 
1343     // Cost model is not run in the VPlan-native path - return conservative
1344     // result until this changes.
1345     if (EnableVPlanNativePath)
1346       return false;
1347 
1348     auto Scalars = InstsToScalarize.find(VF);
1349     assert(Scalars != InstsToScalarize.end() &&
1350            "VF not yet analyzed for scalarization profitability");
1351     return Scalars->second.find(I) != Scalars->second.end();
1352   }
1353 
1354   /// Returns true if \p I is known to be uniform after vectorization.
1355   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1356     if (VF.isScalar())
1357       return true;
1358 
1359     // Cost model is not run in the VPlan-native path - return conservative
1360     // result until this changes.
1361     if (EnableVPlanNativePath)
1362       return false;
1363 
1364     auto UniformsPerVF = Uniforms.find(VF);
1365     assert(UniformsPerVF != Uniforms.end() &&
1366            "VF not yet analyzed for uniformity");
1367     return UniformsPerVF->second.count(I);
1368   }
1369 
1370   /// Returns true if \p I is known to be scalar after vectorization.
1371   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1372     if (VF.isScalar())
1373       return true;
1374 
1375     // Cost model is not run in the VPlan-native path - return conservative
1376     // result until this changes.
1377     if (EnableVPlanNativePath)
1378       return false;
1379 
1380     auto ScalarsPerVF = Scalars.find(VF);
1381     assert(ScalarsPerVF != Scalars.end() &&
1382            "Scalar values are not calculated for VF");
1383     return ScalarsPerVF->second.count(I);
1384   }
1385 
1386   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1387   /// for vectorization factor \p VF.
1388   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1389     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1390            !isProfitableToScalarize(I, VF) &&
1391            !isScalarAfterVectorization(I, VF);
1392   }
1393 
1394   /// Decision that was taken during cost calculation for memory instruction.
1395   enum InstWidening {
1396     CM_Unknown,
1397     CM_Widen,         // For consecutive accesses with stride +1.
1398     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1399     CM_Interleave,
1400     CM_GatherScatter,
1401     CM_Scalarize
1402   };
1403 
1404   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1405   /// instruction \p I and vector width \p VF.
1406   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1407                            InstructionCost Cost) {
1408     assert(VF.isVector() && "Expected VF >=2");
1409     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1410   }
1411 
1412   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1413   /// interleaving group \p Grp and vector width \p VF.
1414   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1415                            ElementCount VF, InstWidening W,
1416                            InstructionCost Cost) {
1417     assert(VF.isVector() && "Expected VF >=2");
1418     /// Broadcast this decicion to all instructions inside the group.
1419     /// But the cost will be assigned to one instruction only.
1420     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1421       if (auto *I = Grp->getMember(i)) {
1422         if (Grp->getInsertPos() == I)
1423           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1424         else
1425           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1426       }
1427     }
1428   }
1429 
1430   /// Return the cost model decision for the given instruction \p I and vector
1431   /// width \p VF. Return CM_Unknown if this instruction did not pass
1432   /// through the cost modeling.
1433   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1434     assert(VF.isVector() && "Expected VF to be a vector VF");
1435     // Cost model is not run in the VPlan-native path - return conservative
1436     // result until this changes.
1437     if (EnableVPlanNativePath)
1438       return CM_GatherScatter;
1439 
1440     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1441     auto Itr = WideningDecisions.find(InstOnVF);
1442     if (Itr == WideningDecisions.end())
1443       return CM_Unknown;
1444     return Itr->second.first;
1445   }
1446 
1447   /// Return the vectorization cost for the given instruction \p I and vector
1448   /// width \p VF.
1449   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1450     assert(VF.isVector() && "Expected VF >=2");
1451     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1452     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1453            "The cost is not calculated");
1454     return WideningDecisions[InstOnVF].second;
1455   }
1456 
1457   /// Return True if instruction \p I is an optimizable truncate whose operand
1458   /// is an induction variable. Such a truncate will be removed by adding a new
1459   /// induction variable with the destination type.
1460   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1461     // If the instruction is not a truncate, return false.
1462     auto *Trunc = dyn_cast<TruncInst>(I);
1463     if (!Trunc)
1464       return false;
1465 
1466     // Get the source and destination types of the truncate.
1467     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1468     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1469 
1470     // If the truncate is free for the given types, return false. Replacing a
1471     // free truncate with an induction variable would add an induction variable
1472     // update instruction to each iteration of the loop. We exclude from this
1473     // check the primary induction variable since it will need an update
1474     // instruction regardless.
1475     Value *Op = Trunc->getOperand(0);
1476     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1477       return false;
1478 
1479     // If the truncated value is not an induction variable, return false.
1480     return Legal->isInductionPhi(Op);
1481   }
1482 
1483   /// Collects the instructions to scalarize for each predicated instruction in
1484   /// the loop.
1485   void collectInstsToScalarize(ElementCount VF);
1486 
1487   /// Collect Uniform and Scalar values for the given \p VF.
1488   /// The sets depend on CM decision for Load/Store instructions
1489   /// that may be vectorized as interleave, gather-scatter or scalarized.
1490   void collectUniformsAndScalars(ElementCount VF) {
1491     // Do the analysis once.
1492     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1493       return;
1494     setCostBasedWideningDecision(VF);
1495     collectLoopUniforms(VF);
1496     collectLoopScalars(VF);
1497   }
1498 
1499   /// Returns true if the target machine supports masked store operation
1500   /// for the given \p DataType and kind of access to \p Ptr.
1501   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1502     return Legal->isConsecutivePtr(DataType, Ptr) &&
1503            TTI.isLegalMaskedStore(DataType, Alignment);
1504   }
1505 
1506   /// Returns true if the target machine supports masked load operation
1507   /// for the given \p DataType and kind of access to \p Ptr.
1508   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1509     return Legal->isConsecutivePtr(DataType, Ptr) &&
1510            TTI.isLegalMaskedLoad(DataType, Alignment);
1511   }
1512 
1513   /// Returns true if the target machine can represent \p V as a masked gather
1514   /// or scatter operation.
1515   bool isLegalGatherOrScatter(Value *V,
1516                               ElementCount VF = ElementCount::getFixed(1)) {
1517     bool LI = isa<LoadInst>(V);
1518     bool SI = isa<StoreInst>(V);
1519     if (!LI && !SI)
1520       return false;
1521     auto *Ty = getLoadStoreType(V);
1522     Align Align = getLoadStoreAlignment(V);
1523     if (VF.isVector())
1524       Ty = VectorType::get(Ty, VF);
1525     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1526            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1527   }
1528 
1529   /// Returns true if the target machine supports all of the reduction
1530   /// variables found for the given VF.
1531   bool canVectorizeReductions(ElementCount VF) const {
1532     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1533       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1534       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1535     }));
1536   }
1537 
1538   /// Returns true if \p I is an instruction that will be scalarized with
1539   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1540   /// instructions include conditional stores and instructions that may divide
1541   /// by zero.
1542   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1543 
1544   // Returns true if \p I is an instruction that will be predicated either
1545   // through scalar predication or masked load/store or masked gather/scatter.
1546   // \p VF is the vectorization factor that will be used to vectorize \p I.
1547   // Superset of instructions that return true for isScalarWithPredication.
1548   bool isPredicatedInst(Instruction *I, ElementCount VF,
1549                         bool IsKnownUniform = false) {
1550     // When we know the load is uniform and the original scalar loop was not
1551     // predicated we don't need to mark it as a predicated instruction. Any
1552     // vectorised blocks created when tail-folding are something artificial we
1553     // have introduced and we know there is always at least one active lane.
1554     // That's why we call Legal->blockNeedsPredication here because it doesn't
1555     // query tail-folding.
1556     if (IsKnownUniform && isa<LoadInst>(I) &&
1557         !Legal->blockNeedsPredication(I->getParent()))
1558       return false;
1559     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1560       return false;
1561     // Loads and stores that need some form of masked operation are predicated
1562     // instructions.
1563     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1564       return Legal->isMaskRequired(I);
1565     return isScalarWithPredication(I, VF);
1566   }
1567 
1568   /// Returns true if \p I is a memory instruction with consecutive memory
1569   /// access that can be widened.
1570   bool
1571   memoryInstructionCanBeWidened(Instruction *I,
1572                                 ElementCount VF = ElementCount::getFixed(1));
1573 
1574   /// Returns true if \p I is a memory instruction in an interleaved-group
1575   /// of memory accesses that can be vectorized with wide vector loads/stores
1576   /// and shuffles.
1577   bool
1578   interleavedAccessCanBeWidened(Instruction *I,
1579                                 ElementCount VF = ElementCount::getFixed(1));
1580 
1581   /// Check if \p Instr belongs to any interleaved access group.
1582   bool isAccessInterleaved(Instruction *Instr) {
1583     return InterleaveInfo.isInterleaved(Instr);
1584   }
1585 
1586   /// Get the interleaved access group that \p Instr belongs to.
1587   const InterleaveGroup<Instruction> *
1588   getInterleavedAccessGroup(Instruction *Instr) {
1589     return InterleaveInfo.getInterleaveGroup(Instr);
1590   }
1591 
1592   /// Returns true if we're required to use a scalar epilogue for at least
1593   /// the final iteration of the original loop.
1594   bool requiresScalarEpilogue(ElementCount VF) const {
1595     if (!isScalarEpilogueAllowed())
1596       return false;
1597     // If we might exit from anywhere but the latch, must run the exiting
1598     // iteration in scalar form.
1599     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1600       return true;
1601     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1602   }
1603 
1604   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1605   /// loop hint annotation.
1606   bool isScalarEpilogueAllowed() const {
1607     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1608   }
1609 
1610   /// Returns true if all loop blocks should be masked to fold tail loop.
1611   bool foldTailByMasking() const { return FoldTailByMasking; }
1612 
1613   /// Returns true if the instructions in this block requires predication
1614   /// for any reason, e.g. because tail folding now requires a predicate
1615   /// or because the block in the original loop was predicated.
1616   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1617     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1618   }
1619 
1620   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1621   /// nodes to the chain of instructions representing the reductions. Uses a
1622   /// MapVector to ensure deterministic iteration order.
1623   using ReductionChainMap =
1624       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1625 
1626   /// Return the chain of instructions representing an inloop reduction.
1627   const ReductionChainMap &getInLoopReductionChains() const {
1628     return InLoopReductionChains;
1629   }
1630 
1631   /// Returns true if the Phi is part of an inloop reduction.
1632   bool isInLoopReduction(PHINode *Phi) const {
1633     return InLoopReductionChains.count(Phi);
1634   }
1635 
1636   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1637   /// with factor VF.  Return the cost of the instruction, including
1638   /// scalarization overhead if it's needed.
1639   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1640 
1641   /// Estimate cost of a call instruction CI if it were vectorized with factor
1642   /// VF. Return the cost of the instruction, including scalarization overhead
1643   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1644   /// scalarized -
1645   /// i.e. either vector version isn't available, or is too expensive.
1646   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1647                                     bool &NeedToScalarize) const;
1648 
1649   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1650   /// that of B.
1651   bool isMoreProfitable(const VectorizationFactor &A,
1652                         const VectorizationFactor &B) const;
1653 
1654   /// Invalidates decisions already taken by the cost model.
1655   void invalidateCostModelingDecisions() {
1656     WideningDecisions.clear();
1657     Uniforms.clear();
1658     Scalars.clear();
1659   }
1660 
1661 private:
1662   unsigned NumPredStores = 0;
1663 
1664   /// Convenience function that returns the value of vscale_range iff
1665   /// vscale_range.min == vscale_range.max or otherwise returns the value
1666   /// returned by the corresponding TLI method.
1667   Optional<unsigned> getVScaleForTuning() const;
1668 
1669   /// \return An upper bound for the vectorization factors for both
1670   /// fixed and scalable vectorization, where the minimum-known number of
1671   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1672   /// disabled or unsupported, then the scalable part will be equal to
1673   /// ElementCount::getScalable(0).
1674   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1675                                            ElementCount UserVF,
1676                                            bool FoldTailByMasking);
1677 
1678   /// \return the maximized element count based on the targets vector
1679   /// registers and the loop trip-count, but limited to a maximum safe VF.
1680   /// This is a helper function of computeFeasibleMaxVF.
1681   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1682   /// issue that occurred on one of the buildbots which cannot be reproduced
1683   /// without having access to the properietary compiler (see comments on
1684   /// D98509). The issue is currently under investigation and this workaround
1685   /// will be removed as soon as possible.
1686   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1687                                        unsigned SmallestType,
1688                                        unsigned WidestType,
1689                                        const ElementCount &MaxSafeVF,
1690                                        bool FoldTailByMasking);
1691 
1692   /// \return the maximum legal scalable VF, based on the safe max number
1693   /// of elements.
1694   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1695 
1696   /// The vectorization cost is a combination of the cost itself and a boolean
1697   /// indicating whether any of the contributing operations will actually
1698   /// operate on vector values after type legalization in the backend. If this
1699   /// latter value is false, then all operations will be scalarized (i.e. no
1700   /// vectorization has actually taken place).
1701   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1702 
1703   /// Returns the expected execution cost. The unit of the cost does
1704   /// not matter because we use the 'cost' units to compare different
1705   /// vector widths. The cost that is returned is *not* normalized by
1706   /// the factor width. If \p Invalid is not nullptr, this function
1707   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1708   /// each instruction that has an Invalid cost for the given VF.
1709   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1710   VectorizationCostTy
1711   expectedCost(ElementCount VF,
1712                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1713 
1714   /// Returns the execution time cost of an instruction for a given vector
1715   /// width. Vector width of one means scalar.
1716   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1717 
1718   /// The cost-computation logic from getInstructionCost which provides
1719   /// the vector type as an output parameter.
1720   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1721                                      Type *&VectorTy);
1722 
1723   /// Return the cost of instructions in an inloop reduction pattern, if I is
1724   /// part of that pattern.
1725   Optional<InstructionCost>
1726   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1727                           TTI::TargetCostKind CostKind);
1728 
1729   /// Calculate vectorization cost of memory instruction \p I.
1730   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1731 
1732   /// The cost computation for scalarized memory instruction.
1733   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1734 
1735   /// The cost computation for interleaving group of memory instructions.
1736   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1737 
1738   /// The cost computation for Gather/Scatter instruction.
1739   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1740 
1741   /// The cost computation for widening instruction \p I with consecutive
1742   /// memory access.
1743   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1744 
1745   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1746   /// Load: scalar load + broadcast.
1747   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1748   /// element)
1749   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1750 
1751   /// Estimate the overhead of scalarizing an instruction. This is a
1752   /// convenience wrapper for the type-based getScalarizationOverhead API.
1753   InstructionCost getScalarizationOverhead(Instruction *I,
1754                                            ElementCount VF) const;
1755 
1756   /// Returns whether the instruction is a load or store and will be a emitted
1757   /// as a vector operation.
1758   bool isConsecutiveLoadOrStore(Instruction *I);
1759 
1760   /// Returns true if an artificially high cost for emulated masked memrefs
1761   /// should be used.
1762   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1763 
1764   /// Map of scalar integer values to the smallest bitwidth they can be legally
1765   /// represented as. The vector equivalents of these values should be truncated
1766   /// to this type.
1767   MapVector<Instruction *, uint64_t> MinBWs;
1768 
1769   /// A type representing the costs for instructions if they were to be
1770   /// scalarized rather than vectorized. The entries are Instruction-Cost
1771   /// pairs.
1772   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1773 
1774   /// A set containing all BasicBlocks that are known to present after
1775   /// vectorization as a predicated block.
1776   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1777 
1778   /// Records whether it is allowed to have the original scalar loop execute at
1779   /// least once. This may be needed as a fallback loop in case runtime
1780   /// aliasing/dependence checks fail, or to handle the tail/remainder
1781   /// iterations when the trip count is unknown or doesn't divide by the VF,
1782   /// or as a peel-loop to handle gaps in interleave-groups.
1783   /// Under optsize and when the trip count is very small we don't allow any
1784   /// iterations to execute in the scalar loop.
1785   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1786 
1787   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1788   bool FoldTailByMasking = false;
1789 
1790   /// A map holding scalar costs for different vectorization factors. The
1791   /// presence of a cost for an instruction in the mapping indicates that the
1792   /// instruction will be scalarized when vectorizing with the associated
1793   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1794   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1795 
1796   /// Holds the instructions known to be uniform after vectorization.
1797   /// The data is collected per VF.
1798   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1799 
1800   /// Holds the instructions known to be scalar after vectorization.
1801   /// The data is collected per VF.
1802   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1803 
1804   /// Holds the instructions (address computations) that are forced to be
1805   /// scalarized.
1806   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1807 
1808   /// PHINodes of the reductions that should be expanded in-loop along with
1809   /// their associated chains of reduction operations, in program order from top
1810   /// (PHI) to bottom
1811   ReductionChainMap InLoopReductionChains;
1812 
1813   /// A Map of inloop reduction operations and their immediate chain operand.
1814   /// FIXME: This can be removed once reductions can be costed correctly in
1815   /// vplan. This was added to allow quick lookup to the inloop operations,
1816   /// without having to loop through InLoopReductionChains.
1817   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1818 
1819   /// Returns the expected difference in cost from scalarizing the expression
1820   /// feeding a predicated instruction \p PredInst. The instructions to
1821   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1822   /// non-negative return value implies the expression will be scalarized.
1823   /// Currently, only single-use chains are considered for scalarization.
1824   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1825                               ElementCount VF);
1826 
1827   /// Collect the instructions that are uniform after vectorization. An
1828   /// instruction is uniform if we represent it with a single scalar value in
1829   /// the vectorized loop corresponding to each vector iteration. Examples of
1830   /// uniform instructions include pointer operands of consecutive or
1831   /// interleaved memory accesses. Note that although uniformity implies an
1832   /// instruction will be scalar, the reverse is not true. In general, a
1833   /// scalarized instruction will be represented by VF scalar values in the
1834   /// vectorized loop, each corresponding to an iteration of the original
1835   /// scalar loop.
1836   void collectLoopUniforms(ElementCount VF);
1837 
1838   /// Collect the instructions that are scalar after vectorization. An
1839   /// instruction is scalar if it is known to be uniform or will be scalarized
1840   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1841   /// to the list if they are used by a load/store instruction that is marked as
1842   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1843   /// VF values in the vectorized loop, each corresponding to an iteration of
1844   /// the original scalar loop.
1845   void collectLoopScalars(ElementCount VF);
1846 
1847   /// Keeps cost model vectorization decision and cost for instructions.
1848   /// Right now it is used for memory instructions only.
1849   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1850                                 std::pair<InstWidening, InstructionCost>>;
1851 
1852   DecisionList WideningDecisions;
1853 
1854   /// Returns true if \p V is expected to be vectorized and it needs to be
1855   /// extracted.
1856   bool needsExtract(Value *V, ElementCount VF) const {
1857     Instruction *I = dyn_cast<Instruction>(V);
1858     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1859         TheLoop->isLoopInvariant(I))
1860       return false;
1861 
1862     // Assume we can vectorize V (and hence we need extraction) if the
1863     // scalars are not computed yet. This can happen, because it is called
1864     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1865     // the scalars are collected. That should be a safe assumption in most
1866     // cases, because we check if the operands have vectorizable types
1867     // beforehand in LoopVectorizationLegality.
1868     return Scalars.find(VF) == Scalars.end() ||
1869            !isScalarAfterVectorization(I, VF);
1870   };
1871 
1872   /// Returns a range containing only operands needing to be extracted.
1873   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1874                                                    ElementCount VF) const {
1875     return SmallVector<Value *, 4>(make_filter_range(
1876         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1877   }
1878 
1879   /// Determines if we have the infrastructure to vectorize loop \p L and its
1880   /// epilogue, assuming the main loop is vectorized by \p VF.
1881   bool isCandidateForEpilogueVectorization(const Loop &L,
1882                                            const ElementCount VF) const;
1883 
1884   /// Returns true if epilogue vectorization is considered profitable, and
1885   /// false otherwise.
1886   /// \p VF is the vectorization factor chosen for the original loop.
1887   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1888 
1889 public:
1890   /// The loop that we evaluate.
1891   Loop *TheLoop;
1892 
1893   /// Predicated scalar evolution analysis.
1894   PredicatedScalarEvolution &PSE;
1895 
1896   /// Loop Info analysis.
1897   LoopInfo *LI;
1898 
1899   /// Vectorization legality.
1900   LoopVectorizationLegality *Legal;
1901 
1902   /// Vector target information.
1903   const TargetTransformInfo &TTI;
1904 
1905   /// Target Library Info.
1906   const TargetLibraryInfo *TLI;
1907 
1908   /// Demanded bits analysis.
1909   DemandedBits *DB;
1910 
1911   /// Assumption cache.
1912   AssumptionCache *AC;
1913 
1914   /// Interface to emit optimization remarks.
1915   OptimizationRemarkEmitter *ORE;
1916 
1917   const Function *TheFunction;
1918 
1919   /// Loop Vectorize Hint.
1920   const LoopVectorizeHints *Hints;
1921 
1922   /// The interleave access information contains groups of interleaved accesses
1923   /// with the same stride and close to each other.
1924   InterleavedAccessInfo &InterleaveInfo;
1925 
1926   /// Values to ignore in the cost model.
1927   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1928 
1929   /// Values to ignore in the cost model when VF > 1.
1930   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1931 
1932   /// All element types found in the loop.
1933   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1934 
1935   /// Profitable vector factors.
1936   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1937 };
1938 } // end namespace llvm
1939 
1940 /// Helper struct to manage generating runtime checks for vectorization.
1941 ///
1942 /// The runtime checks are created up-front in temporary blocks to allow better
1943 /// estimating the cost and un-linked from the existing IR. After deciding to
1944 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1945 /// temporary blocks are completely removed.
1946 class GeneratedRTChecks {
1947   /// Basic block which contains the generated SCEV checks, if any.
1948   BasicBlock *SCEVCheckBlock = nullptr;
1949 
1950   /// The value representing the result of the generated SCEV checks. If it is
1951   /// nullptr, either no SCEV checks have been generated or they have been used.
1952   Value *SCEVCheckCond = nullptr;
1953 
1954   /// Basic block which contains the generated memory runtime checks, if any.
1955   BasicBlock *MemCheckBlock = nullptr;
1956 
1957   /// The value representing the result of the generated memory runtime checks.
1958   /// If it is nullptr, either no memory runtime checks have been generated or
1959   /// they have been used.
1960   Value *MemRuntimeCheckCond = nullptr;
1961 
1962   DominatorTree *DT;
1963   LoopInfo *LI;
1964 
1965   SCEVExpander SCEVExp;
1966   SCEVExpander MemCheckExp;
1967 
1968 public:
1969   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1970                     const DataLayout &DL)
1971       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1972         MemCheckExp(SE, DL, "scev.check") {}
1973 
1974   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1975   /// accurately estimate the cost of the runtime checks. The blocks are
1976   /// un-linked from the IR and is added back during vector code generation. If
1977   /// there is no vector code generation, the check blocks are removed
1978   /// completely.
1979   void Create(Loop *L, const LoopAccessInfo &LAI,
1980               const SCEVPredicate &Pred) {
1981 
1982     BasicBlock *LoopHeader = L->getHeader();
1983     BasicBlock *Preheader = L->getLoopPreheader();
1984 
1985     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1986     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1987     // may be used by SCEVExpander. The blocks will be un-linked from their
1988     // predecessors and removed from LI & DT at the end of the function.
1989     if (!Pred.isAlwaysTrue()) {
1990       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1991                                   nullptr, "vector.scevcheck");
1992 
1993       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1994           &Pred, SCEVCheckBlock->getTerminator());
1995     }
1996 
1997     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1998     if (RtPtrChecking.Need) {
1999       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2000       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2001                                  "vector.memcheck");
2002 
2003       MemRuntimeCheckCond =
2004           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2005                            RtPtrChecking.getChecks(), MemCheckExp);
2006       assert(MemRuntimeCheckCond &&
2007              "no RT checks generated although RtPtrChecking "
2008              "claimed checks are required");
2009     }
2010 
2011     if (!MemCheckBlock && !SCEVCheckBlock)
2012       return;
2013 
2014     // Unhook the temporary block with the checks, update various places
2015     // accordingly.
2016     if (SCEVCheckBlock)
2017       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2018     if (MemCheckBlock)
2019       MemCheckBlock->replaceAllUsesWith(Preheader);
2020 
2021     if (SCEVCheckBlock) {
2022       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2023       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2024       Preheader->getTerminator()->eraseFromParent();
2025     }
2026     if (MemCheckBlock) {
2027       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2028       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2029       Preheader->getTerminator()->eraseFromParent();
2030     }
2031 
2032     DT->changeImmediateDominator(LoopHeader, Preheader);
2033     if (MemCheckBlock) {
2034       DT->eraseNode(MemCheckBlock);
2035       LI->removeBlock(MemCheckBlock);
2036     }
2037     if (SCEVCheckBlock) {
2038       DT->eraseNode(SCEVCheckBlock);
2039       LI->removeBlock(SCEVCheckBlock);
2040     }
2041   }
2042 
2043   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2044   /// unused.
2045   ~GeneratedRTChecks() {
2046     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2047     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2048     if (!SCEVCheckCond)
2049       SCEVCleaner.markResultUsed();
2050 
2051     if (!MemRuntimeCheckCond)
2052       MemCheckCleaner.markResultUsed();
2053 
2054     if (MemRuntimeCheckCond) {
2055       auto &SE = *MemCheckExp.getSE();
2056       // Memory runtime check generation creates compares that use expanded
2057       // values. Remove them before running the SCEVExpanderCleaners.
2058       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2059         if (MemCheckExp.isInsertedInstruction(&I))
2060           continue;
2061         SE.forgetValue(&I);
2062         I.eraseFromParent();
2063       }
2064     }
2065     MemCheckCleaner.cleanup();
2066     SCEVCleaner.cleanup();
2067 
2068     if (SCEVCheckCond)
2069       SCEVCheckBlock->eraseFromParent();
2070     if (MemRuntimeCheckCond)
2071       MemCheckBlock->eraseFromParent();
2072   }
2073 
2074   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2075   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2076   /// depending on the generated condition.
2077   BasicBlock *emitSCEVChecks(BasicBlock *Bypass,
2078                              BasicBlock *LoopVectorPreHeader,
2079                              BasicBlock *LoopExitBlock) {
2080     if (!SCEVCheckCond)
2081       return nullptr;
2082     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2083       if (C->isZero())
2084         return nullptr;
2085 
2086     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2087 
2088     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2089     // Create new preheader for vector loop.
2090     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2091       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2092 
2093     SCEVCheckBlock->getTerminator()->eraseFromParent();
2094     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2095     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2096                                                 SCEVCheckBlock);
2097 
2098     DT->addNewBlock(SCEVCheckBlock, Pred);
2099     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2100 
2101     ReplaceInstWithInst(
2102         SCEVCheckBlock->getTerminator(),
2103         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2104     // Mark the check as used, to prevent it from being removed during cleanup.
2105     SCEVCheckCond = nullptr;
2106     return SCEVCheckBlock;
2107   }
2108 
2109   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2110   /// the branches to branch to the vector preheader or \p Bypass, depending on
2111   /// the generated condition.
2112   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2113                                    BasicBlock *LoopVectorPreHeader) {
2114     // Check if we generated code that checks in runtime if arrays overlap.
2115     if (!MemRuntimeCheckCond)
2116       return nullptr;
2117 
2118     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2119     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2120                                                 MemCheckBlock);
2121 
2122     DT->addNewBlock(MemCheckBlock, Pred);
2123     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2124     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2125 
2126     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2127       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2128 
2129     ReplaceInstWithInst(
2130         MemCheckBlock->getTerminator(),
2131         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2132     MemCheckBlock->getTerminator()->setDebugLoc(
2133         Pred->getTerminator()->getDebugLoc());
2134 
2135     // Mark the check as used, to prevent it from being removed during cleanup.
2136     MemRuntimeCheckCond = nullptr;
2137     return MemCheckBlock;
2138   }
2139 };
2140 
2141 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2142 // vectorization. The loop needs to be annotated with #pragma omp simd
2143 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2144 // vector length information is not provided, vectorization is not considered
2145 // explicit. Interleave hints are not allowed either. These limitations will be
2146 // relaxed in the future.
2147 // Please, note that we are currently forced to abuse the pragma 'clang
2148 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2149 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2150 // provides *explicit vectorization hints* (LV can bypass legal checks and
2151 // assume that vectorization is legal). However, both hints are implemented
2152 // using the same metadata (llvm.loop.vectorize, processed by
2153 // LoopVectorizeHints). This will be fixed in the future when the native IR
2154 // representation for pragma 'omp simd' is introduced.
2155 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2156                                    OptimizationRemarkEmitter *ORE) {
2157   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2158   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2159 
2160   // Only outer loops with an explicit vectorization hint are supported.
2161   // Unannotated outer loops are ignored.
2162   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2163     return false;
2164 
2165   Function *Fn = OuterLp->getHeader()->getParent();
2166   if (!Hints.allowVectorization(Fn, OuterLp,
2167                                 true /*VectorizeOnlyWhenForced*/)) {
2168     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2169     return false;
2170   }
2171 
2172   if (Hints.getInterleave() > 1) {
2173     // TODO: Interleave support is future work.
2174     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2175                          "outer loops.\n");
2176     Hints.emitRemarkWithHints();
2177     return false;
2178   }
2179 
2180   return true;
2181 }
2182 
2183 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2184                                   OptimizationRemarkEmitter *ORE,
2185                                   SmallVectorImpl<Loop *> &V) {
2186   // Collect inner loops and outer loops without irreducible control flow. For
2187   // now, only collect outer loops that have explicit vectorization hints. If we
2188   // are stress testing the VPlan H-CFG construction, we collect the outermost
2189   // loop of every loop nest.
2190   if (L.isInnermost() || VPlanBuildStressTest ||
2191       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2192     LoopBlocksRPO RPOT(&L);
2193     RPOT.perform(LI);
2194     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2195       V.push_back(&L);
2196       // TODO: Collect inner loops inside marked outer loops in case
2197       // vectorization fails for the outer loop. Do not invoke
2198       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2199       // already known to be reducible. We can use an inherited attribute for
2200       // that.
2201       return;
2202     }
2203   }
2204   for (Loop *InnerL : L)
2205     collectSupportedLoops(*InnerL, LI, ORE, V);
2206 }
2207 
2208 namespace {
2209 
2210 /// The LoopVectorize Pass.
2211 struct LoopVectorize : public FunctionPass {
2212   /// Pass identification, replacement for typeid
2213   static char ID;
2214 
2215   LoopVectorizePass Impl;
2216 
2217   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2218                          bool VectorizeOnlyWhenForced = false)
2219       : FunctionPass(ID),
2220         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2221     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2222   }
2223 
2224   bool runOnFunction(Function &F) override {
2225     if (skipFunction(F))
2226       return false;
2227 
2228     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2229     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2230     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2231     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2232     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2233     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2234     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2235     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2236     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2237     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2238     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2239     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2240     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2241 
2242     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2243         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2244 
2245     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2246                         GetLAA, *ORE, PSI).MadeAnyChange;
2247   }
2248 
2249   void getAnalysisUsage(AnalysisUsage &AU) const override {
2250     AU.addRequired<AssumptionCacheTracker>();
2251     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2252     AU.addRequired<DominatorTreeWrapperPass>();
2253     AU.addRequired<LoopInfoWrapperPass>();
2254     AU.addRequired<ScalarEvolutionWrapperPass>();
2255     AU.addRequired<TargetTransformInfoWrapperPass>();
2256     AU.addRequired<AAResultsWrapperPass>();
2257     AU.addRequired<LoopAccessLegacyAnalysis>();
2258     AU.addRequired<DemandedBitsWrapperPass>();
2259     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2260     AU.addRequired<InjectTLIMappingsLegacy>();
2261 
2262     // We currently do not preserve loopinfo/dominator analyses with outer loop
2263     // vectorization. Until this is addressed, mark these analyses as preserved
2264     // only for non-VPlan-native path.
2265     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2266     if (!EnableVPlanNativePath) {
2267       AU.addPreserved<LoopInfoWrapperPass>();
2268       AU.addPreserved<DominatorTreeWrapperPass>();
2269     }
2270 
2271     AU.addPreserved<BasicAAWrapperPass>();
2272     AU.addPreserved<GlobalsAAWrapperPass>();
2273     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2274   }
2275 };
2276 
2277 } // end anonymous namespace
2278 
2279 //===----------------------------------------------------------------------===//
2280 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2281 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2282 //===----------------------------------------------------------------------===//
2283 
2284 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2285   // We need to place the broadcast of invariant variables outside the loop,
2286   // but only if it's proven safe to do so. Else, broadcast will be inside
2287   // vector loop body.
2288   Instruction *Instr = dyn_cast<Instruction>(V);
2289   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2290                      (!Instr ||
2291                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2292   // Place the code for broadcasting invariant variables in the new preheader.
2293   IRBuilder<>::InsertPointGuard Guard(Builder);
2294   if (SafeToHoist)
2295     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2296 
2297   // Broadcast the scalar into all locations in the vector.
2298   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2299 
2300   return Shuf;
2301 }
2302 
2303 /// This function adds
2304 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2305 /// to each vector element of Val. The sequence starts at StartIndex.
2306 /// \p Opcode is relevant for FP induction variable.
2307 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2308                             Instruction::BinaryOps BinOp, ElementCount VF,
2309                             IRBuilderBase &Builder) {
2310   assert(VF.isVector() && "only vector VFs are supported");
2311 
2312   // Create and check the types.
2313   auto *ValVTy = cast<VectorType>(Val->getType());
2314   ElementCount VLen = ValVTy->getElementCount();
2315 
2316   Type *STy = Val->getType()->getScalarType();
2317   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2318          "Induction Step must be an integer or FP");
2319   assert(Step->getType() == STy && "Step has wrong type");
2320 
2321   SmallVector<Constant *, 8> Indices;
2322 
2323   // Create a vector of consecutive numbers from zero to VF.
2324   VectorType *InitVecValVTy = ValVTy;
2325   if (STy->isFloatingPointTy()) {
2326     Type *InitVecValSTy =
2327         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2328     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2329   }
2330   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2331 
2332   // Splat the StartIdx
2333   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2334 
2335   if (STy->isIntegerTy()) {
2336     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2337     Step = Builder.CreateVectorSplat(VLen, Step);
2338     assert(Step->getType() == Val->getType() && "Invalid step vec");
2339     // FIXME: The newly created binary instructions should contain nsw/nuw
2340     // flags, which can be found from the original scalar operations.
2341     Step = Builder.CreateMul(InitVec, Step);
2342     return Builder.CreateAdd(Val, Step, "induction");
2343   }
2344 
2345   // Floating point induction.
2346   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2347          "Binary Opcode should be specified for FP induction");
2348   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2349   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2350 
2351   Step = Builder.CreateVectorSplat(VLen, Step);
2352   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2353   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2354 }
2355 
2356 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2357 /// variable on which to base the steps, \p Step is the size of the step.
2358 static void buildScalarSteps(Value *ScalarIV, Value *Step,
2359                              const InductionDescriptor &ID, VPValue *Def,
2360                              VPTransformState &State) {
2361   IRBuilderBase &Builder = State.Builder;
2362   // We shouldn't have to build scalar steps if we aren't vectorizing.
2363   assert(State.VF.isVector() && "VF should be greater than one");
2364   // Get the value type and ensure it and the step have the same integer type.
2365   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2366   assert(ScalarIVTy == Step->getType() &&
2367          "Val and Step should have the same type");
2368 
2369   // We build scalar steps for both integer and floating-point induction
2370   // variables. Here, we determine the kind of arithmetic we will perform.
2371   Instruction::BinaryOps AddOp;
2372   Instruction::BinaryOps MulOp;
2373   if (ScalarIVTy->isIntegerTy()) {
2374     AddOp = Instruction::Add;
2375     MulOp = Instruction::Mul;
2376   } else {
2377     AddOp = ID.getInductionOpcode();
2378     MulOp = Instruction::FMul;
2379   }
2380 
2381   // Determine the number of scalars we need to generate for each unroll
2382   // iteration.
2383   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
2384   unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2385   // Compute the scalar steps and save the results in State.
2386   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2387                                      ScalarIVTy->getScalarSizeInBits());
2388   Type *VecIVTy = nullptr;
2389   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2390   if (!FirstLaneOnly && State.VF.isScalable()) {
2391     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2392     UnitStepVec =
2393         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2394     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2395     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2396   }
2397 
2398   for (unsigned Part = 0; Part < State.UF; ++Part) {
2399     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2400 
2401     if (!FirstLaneOnly && State.VF.isScalable()) {
2402       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2403       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2404       if (ScalarIVTy->isFloatingPointTy())
2405         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2406       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2407       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2408       State.set(Def, Add, Part);
2409       // It's useful to record the lane values too for the known minimum number
2410       // of elements so we do those below. This improves the code quality when
2411       // trying to extract the first element, for example.
2412     }
2413 
2414     if (ScalarIVTy->isFloatingPointTy())
2415       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2416 
2417     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2418       Value *StartIdx = Builder.CreateBinOp(
2419           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2420       // The step returned by `createStepForVF` is a runtime-evaluated value
2421       // when VF is scalable. Otherwise, it should be folded into a Constant.
2422       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2423              "Expected StartIdx to be folded to a constant when VF is not "
2424              "scalable");
2425       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2426       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2427       State.set(Def, Add, VPIteration(Part, Lane));
2428     }
2429   }
2430 }
2431 
2432 // Generate code for the induction step. Note that induction steps are
2433 // required to be loop-invariant
2434 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE,
2435                               Instruction *InsertBefore,
2436                               Loop *OrigLoop = nullptr) {
2437   const DataLayout &DL = SE.getDataLayout();
2438   assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) &&
2439          "Induction step should be loop invariant");
2440   if (auto *E = dyn_cast<SCEVUnknown>(Step))
2441     return E->getValue();
2442 
2443   SCEVExpander Exp(SE, DL, "induction");
2444   return Exp.expandCodeFor(Step, Step->getType(), InsertBefore);
2445 }
2446 
2447 /// Compute the transformed value of Index at offset StartValue using step
2448 /// StepValue.
2449 /// For integer induction, returns StartValue + Index * StepValue.
2450 /// For pointer induction, returns StartValue[Index * StepValue].
2451 /// FIXME: The newly created binary instructions should contain nsw/nuw
2452 /// flags, which can be found from the original scalar operations.
2453 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index,
2454                                    Value *StartValue, Value *Step,
2455                                    const InductionDescriptor &ID) {
2456   assert(Index->getType()->getScalarType() == Step->getType() &&
2457          "Index scalar type does not match StepValue type");
2458 
2459   // Note: the IR at this point is broken. We cannot use SE to create any new
2460   // SCEV and then expand it, hoping that SCEV's simplification will give us
2461   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2462   // lead to various SCEV crashes. So all we can do is to use builder and rely
2463   // on InstCombine for future simplifications. Here we handle some trivial
2464   // cases only.
2465   auto CreateAdd = [&B](Value *X, Value *Y) {
2466     assert(X->getType() == Y->getType() && "Types don't match!");
2467     if (auto *CX = dyn_cast<ConstantInt>(X))
2468       if (CX->isZero())
2469         return Y;
2470     if (auto *CY = dyn_cast<ConstantInt>(Y))
2471       if (CY->isZero())
2472         return X;
2473     return B.CreateAdd(X, Y);
2474   };
2475 
2476   // We allow X to be a vector type, in which case Y will potentially be
2477   // splatted into a vector with the same element count.
2478   auto CreateMul = [&B](Value *X, Value *Y) {
2479     assert(X->getType()->getScalarType() == Y->getType() &&
2480            "Types don't match!");
2481     if (auto *CX = dyn_cast<ConstantInt>(X))
2482       if (CX->isOne())
2483         return Y;
2484     if (auto *CY = dyn_cast<ConstantInt>(Y))
2485       if (CY->isOne())
2486         return X;
2487     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2488     if (XVTy && !isa<VectorType>(Y->getType()))
2489       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2490     return B.CreateMul(X, Y);
2491   };
2492 
2493   switch (ID.getKind()) {
2494   case InductionDescriptor::IK_IntInduction: {
2495     assert(!isa<VectorType>(Index->getType()) &&
2496            "Vector indices not supported for integer inductions yet");
2497     assert(Index->getType() == StartValue->getType() &&
2498            "Index type does not match StartValue type");
2499     if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2500       return B.CreateSub(StartValue, Index);
2501     auto *Offset = CreateMul(Index, Step);
2502     return CreateAdd(StartValue, Offset);
2503   }
2504   case InductionDescriptor::IK_PtrInduction: {
2505     assert(isa<Constant>(Step) &&
2506            "Expected constant step for pointer induction");
2507     return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step));
2508   }
2509   case InductionDescriptor::IK_FpInduction: {
2510     assert(!isa<VectorType>(Index->getType()) &&
2511            "Vector indices not supported for FP inductions yet");
2512     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2513     auto InductionBinOp = ID.getInductionBinOp();
2514     assert(InductionBinOp &&
2515            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2516             InductionBinOp->getOpcode() == Instruction::FSub) &&
2517            "Original bin op should be defined for FP induction");
2518 
2519     Value *MulExp = B.CreateFMul(Step, Index);
2520     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2521                          "induction");
2522   }
2523   case InductionDescriptor::IK_NoInduction:
2524     return nullptr;
2525   }
2526   llvm_unreachable("invalid enum");
2527 }
2528 
2529 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2530                                                     const VPIteration &Instance,
2531                                                     VPTransformState &State) {
2532   Value *ScalarInst = State.get(Def, Instance);
2533   Value *VectorValue = State.get(Def, Instance.Part);
2534   VectorValue = Builder.CreateInsertElement(
2535       VectorValue, ScalarInst,
2536       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2537   State.set(Def, VectorValue, Instance.Part);
2538 }
2539 
2540 // Return whether we allow using masked interleave-groups (for dealing with
2541 // strided loads/stores that reside in predicated blocks, or for dealing
2542 // with gaps).
2543 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2544   // If an override option has been passed in for interleaved accesses, use it.
2545   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2546     return EnableMaskedInterleavedMemAccesses;
2547 
2548   return TTI.enableMaskedInterleavedAccessVectorization();
2549 }
2550 
2551 // Try to vectorize the interleave group that \p Instr belongs to.
2552 //
2553 // E.g. Translate following interleaved load group (factor = 3):
2554 //   for (i = 0; i < N; i+=3) {
2555 //     R = Pic[i];             // Member of index 0
2556 //     G = Pic[i+1];           // Member of index 1
2557 //     B = Pic[i+2];           // Member of index 2
2558 //     ... // do something to R, G, B
2559 //   }
2560 // To:
2561 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2562 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2563 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2564 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2565 //
2566 // Or translate following interleaved store group (factor = 3):
2567 //   for (i = 0; i < N; i+=3) {
2568 //     ... do something to R, G, B
2569 //     Pic[i]   = R;           // Member of index 0
2570 //     Pic[i+1] = G;           // Member of index 1
2571 //     Pic[i+2] = B;           // Member of index 2
2572 //   }
2573 // To:
2574 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2575 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2576 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2577 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2578 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2579 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2580     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2581     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2582     VPValue *BlockInMask) {
2583   Instruction *Instr = Group->getInsertPos();
2584   const DataLayout &DL = Instr->getModule()->getDataLayout();
2585 
2586   // Prepare for the vector type of the interleaved load/store.
2587   Type *ScalarTy = getLoadStoreType(Instr);
2588   unsigned InterleaveFactor = Group->getFactor();
2589   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2590   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2591 
2592   // Prepare for the new pointers.
2593   SmallVector<Value *, 2> AddrParts;
2594   unsigned Index = Group->getIndex(Instr);
2595 
2596   // TODO: extend the masked interleaved-group support to reversed access.
2597   assert((!BlockInMask || !Group->isReverse()) &&
2598          "Reversed masked interleave-group not supported.");
2599 
2600   // If the group is reverse, adjust the index to refer to the last vector lane
2601   // instead of the first. We adjust the index from the first vector lane,
2602   // rather than directly getting the pointer for lane VF - 1, because the
2603   // pointer operand of the interleaved access is supposed to be uniform. For
2604   // uniform instructions, we're only required to generate a value for the
2605   // first vector lane in each unroll iteration.
2606   if (Group->isReverse())
2607     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2608 
2609   for (unsigned Part = 0; Part < UF; Part++) {
2610     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2611     setDebugLocFromInst(AddrPart);
2612 
2613     // Notice current instruction could be any index. Need to adjust the address
2614     // to the member of index 0.
2615     //
2616     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2617     //       b = A[i];       // Member of index 0
2618     // Current pointer is pointed to A[i+1], adjust it to A[i].
2619     //
2620     // E.g.  A[i+1] = a;     // Member of index 1
2621     //       A[i]   = b;     // Member of index 0
2622     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2623     // Current pointer is pointed to A[i+2], adjust it to A[i].
2624 
2625     bool InBounds = false;
2626     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2627       InBounds = gep->isInBounds();
2628     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2629     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2630 
2631     // Cast to the vector pointer type.
2632     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2633     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2634     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2635   }
2636 
2637   setDebugLocFromInst(Instr);
2638   Value *PoisonVec = PoisonValue::get(VecTy);
2639 
2640   Value *MaskForGaps = nullptr;
2641   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2642     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2643     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2644   }
2645 
2646   // Vectorize the interleaved load group.
2647   if (isa<LoadInst>(Instr)) {
2648     // For each unroll part, create a wide load for the group.
2649     SmallVector<Value *, 2> NewLoads;
2650     for (unsigned Part = 0; Part < UF; Part++) {
2651       Instruction *NewLoad;
2652       if (BlockInMask || MaskForGaps) {
2653         assert(useMaskedInterleavedAccesses(*TTI) &&
2654                "masked interleaved groups are not allowed.");
2655         Value *GroupMask = MaskForGaps;
2656         if (BlockInMask) {
2657           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2658           Value *ShuffledMask = Builder.CreateShuffleVector(
2659               BlockInMaskPart,
2660               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2661               "interleaved.mask");
2662           GroupMask = MaskForGaps
2663                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2664                                                 MaskForGaps)
2665                           : ShuffledMask;
2666         }
2667         NewLoad =
2668             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2669                                      GroupMask, PoisonVec, "wide.masked.vec");
2670       }
2671       else
2672         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2673                                             Group->getAlign(), "wide.vec");
2674       Group->addMetadata(NewLoad);
2675       NewLoads.push_back(NewLoad);
2676     }
2677 
2678     // For each member in the group, shuffle out the appropriate data from the
2679     // wide loads.
2680     unsigned J = 0;
2681     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2682       Instruction *Member = Group->getMember(I);
2683 
2684       // Skip the gaps in the group.
2685       if (!Member)
2686         continue;
2687 
2688       auto StrideMask =
2689           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2690       for (unsigned Part = 0; Part < UF; Part++) {
2691         Value *StridedVec = Builder.CreateShuffleVector(
2692             NewLoads[Part], StrideMask, "strided.vec");
2693 
2694         // If this member has different type, cast the result type.
2695         if (Member->getType() != ScalarTy) {
2696           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2697           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2698           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2699         }
2700 
2701         if (Group->isReverse())
2702           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2703 
2704         State.set(VPDefs[J], StridedVec, Part);
2705       }
2706       ++J;
2707     }
2708     return;
2709   }
2710 
2711   // The sub vector type for current instruction.
2712   auto *SubVT = VectorType::get(ScalarTy, VF);
2713 
2714   // Vectorize the interleaved store group.
2715   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2716   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2717          "masked interleaved groups are not allowed.");
2718   assert((!MaskForGaps || !VF.isScalable()) &&
2719          "masking gaps for scalable vectors is not yet supported.");
2720   for (unsigned Part = 0; Part < UF; Part++) {
2721     // Collect the stored vector from each member.
2722     SmallVector<Value *, 4> StoredVecs;
2723     for (unsigned i = 0; i < InterleaveFactor; i++) {
2724       assert((Group->getMember(i) || MaskForGaps) &&
2725              "Fail to get a member from an interleaved store group");
2726       Instruction *Member = Group->getMember(i);
2727 
2728       // Skip the gaps in the group.
2729       if (!Member) {
2730         Value *Undef = PoisonValue::get(SubVT);
2731         StoredVecs.push_back(Undef);
2732         continue;
2733       }
2734 
2735       Value *StoredVec = State.get(StoredValues[i], Part);
2736 
2737       if (Group->isReverse())
2738         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2739 
2740       // If this member has different type, cast it to a unified type.
2741 
2742       if (StoredVec->getType() != SubVT)
2743         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2744 
2745       StoredVecs.push_back(StoredVec);
2746     }
2747 
2748     // Concatenate all vectors into a wide vector.
2749     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2750 
2751     // Interleave the elements in the wide vector.
2752     Value *IVec = Builder.CreateShuffleVector(
2753         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2754         "interleaved.vec");
2755 
2756     Instruction *NewStoreInstr;
2757     if (BlockInMask || MaskForGaps) {
2758       Value *GroupMask = MaskForGaps;
2759       if (BlockInMask) {
2760         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2761         Value *ShuffledMask = Builder.CreateShuffleVector(
2762             BlockInMaskPart,
2763             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2764             "interleaved.mask");
2765         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2766                                                       ShuffledMask, MaskForGaps)
2767                                 : ShuffledMask;
2768       }
2769       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2770                                                 Group->getAlign(), GroupMask);
2771     } else
2772       NewStoreInstr =
2773           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2774 
2775     Group->addMetadata(NewStoreInstr);
2776   }
2777 }
2778 
2779 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2780                                                VPReplicateRecipe *RepRecipe,
2781                                                const VPIteration &Instance,
2782                                                bool IfPredicateInstr,
2783                                                VPTransformState &State) {
2784   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2785 
2786   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2787   // the first lane and part.
2788   if (isa<NoAliasScopeDeclInst>(Instr))
2789     if (!Instance.isFirstIteration())
2790       return;
2791 
2792   setDebugLocFromInst(Instr);
2793 
2794   // Does this instruction return a value ?
2795   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2796 
2797   Instruction *Cloned = Instr->clone();
2798   if (!IsVoidRetTy)
2799     Cloned->setName(Instr->getName() + ".cloned");
2800 
2801   // If the scalarized instruction contributes to the address computation of a
2802   // widen masked load/store which was in a basic block that needed predication
2803   // and is not predicated after vectorization, we can't propagate
2804   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2805   // instruction could feed a poison value to the base address of the widen
2806   // load/store.
2807   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2808     Cloned->dropPoisonGeneratingFlags();
2809 
2810   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2811                                Builder.GetInsertPoint());
2812   // Replace the operands of the cloned instructions with their scalar
2813   // equivalents in the new loop.
2814   for (auto &I : enumerate(RepRecipe->operands())) {
2815     auto InputInstance = Instance;
2816     VPValue *Operand = I.value();
2817     VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand);
2818     if (OperandR && OperandR->isUniform())
2819       InputInstance.Lane = VPLane::getFirstLane();
2820     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2821   }
2822   addNewMetadata(Cloned, Instr);
2823 
2824   // Place the cloned scalar in the new loop.
2825   Builder.Insert(Cloned);
2826 
2827   State.set(RepRecipe, Cloned, Instance);
2828 
2829   // If we just cloned a new assumption, add it the assumption cache.
2830   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2831     AC->registerAssumption(II);
2832 
2833   // End if-block.
2834   if (IfPredicateInstr)
2835     PredicatedInstructions.push_back(Cloned);
2836 }
2837 
2838 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
2839   BasicBlock *Header = L->getHeader();
2840   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
2841 
2842   IRBuilder<> B(Header->getTerminator());
2843   Instruction *OldInst =
2844       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
2845   setDebugLocFromInst(OldInst, &B);
2846 
2847   // Connect the header to the exit and header blocks and replace the old
2848   // terminator.
2849   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
2850 
2851   // Now we have two terminators. Remove the old one from the block.
2852   Header->getTerminator()->eraseFromParent();
2853 }
2854 
2855 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2856   if (TripCount)
2857     return TripCount;
2858 
2859   assert(L && "Create Trip Count for null loop.");
2860   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2861   // Find the loop boundaries.
2862   ScalarEvolution *SE = PSE.getSE();
2863   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2864   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2865          "Invalid loop count");
2866 
2867   Type *IdxTy = Legal->getWidestInductionType();
2868   assert(IdxTy && "No type for induction");
2869 
2870   // The exit count might have the type of i64 while the phi is i32. This can
2871   // happen if we have an induction variable that is sign extended before the
2872   // compare. The only way that we get a backedge taken count is that the
2873   // induction variable was signed and as such will not overflow. In such a case
2874   // truncation is legal.
2875   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2876       IdxTy->getPrimitiveSizeInBits())
2877     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2878   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2879 
2880   // Get the total trip count from the count by adding 1.
2881   const SCEV *ExitCount = SE->getAddExpr(
2882       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2883 
2884   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2885 
2886   // Expand the trip count and place the new instructions in the preheader.
2887   // Notice that the pre-header does not change, only the loop body.
2888   SCEVExpander Exp(*SE, DL, "induction");
2889 
2890   // Count holds the overall loop count (N).
2891   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2892                                 L->getLoopPreheader()->getTerminator());
2893 
2894   if (TripCount->getType()->isPointerTy())
2895     TripCount =
2896         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2897                                     L->getLoopPreheader()->getTerminator());
2898 
2899   return TripCount;
2900 }
2901 
2902 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2903   if (VectorTripCount)
2904     return VectorTripCount;
2905 
2906   Value *TC = getOrCreateTripCount(L);
2907   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2908 
2909   Type *Ty = TC->getType();
2910   // This is where we can make the step a runtime constant.
2911   Value *Step = createStepForVF(Builder, Ty, VF, UF);
2912 
2913   // If the tail is to be folded by masking, round the number of iterations N
2914   // up to a multiple of Step instead of rounding down. This is done by first
2915   // adding Step-1 and then rounding down. Note that it's ok if this addition
2916   // overflows: the vector induction variable will eventually wrap to zero given
2917   // that it starts at zero and its Step is a power of two; the loop will then
2918   // exit, with the last early-exit vector comparison also producing all-true.
2919   if (Cost->foldTailByMasking()) {
2920     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2921            "VF*UF must be a power of 2 when folding tail by masking");
2922     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
2923     TC = Builder.CreateAdd(
2924         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
2925   }
2926 
2927   // Now we need to generate the expression for the part of the loop that the
2928   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2929   // iterations are not required for correctness, or N - Step, otherwise. Step
2930   // is equal to the vectorization factor (number of SIMD elements) times the
2931   // unroll factor (number of SIMD instructions).
2932   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2933 
2934   // There are cases where we *must* run at least one iteration in the remainder
2935   // loop.  See the cost model for when this can happen.  If the step evenly
2936   // divides the trip count, we set the remainder to be equal to the step. If
2937   // the step does not evenly divide the trip count, no adjustment is necessary
2938   // since there will already be scalar iterations. Note that the minimum
2939   // iterations check ensures that N >= Step.
2940   if (Cost->requiresScalarEpilogue(VF)) {
2941     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2942     R = Builder.CreateSelect(IsZero, Step, R);
2943   }
2944 
2945   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2946 
2947   return VectorTripCount;
2948 }
2949 
2950 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2951                                                    const DataLayout &DL) {
2952   // Verify that V is a vector type with same number of elements as DstVTy.
2953   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2954   unsigned VF = DstFVTy->getNumElements();
2955   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2956   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2957   Type *SrcElemTy = SrcVecTy->getElementType();
2958   Type *DstElemTy = DstFVTy->getElementType();
2959   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2960          "Vector elements must have same size");
2961 
2962   // Do a direct cast if element types are castable.
2963   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2964     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2965   }
2966   // V cannot be directly casted to desired vector type.
2967   // May happen when V is a floating point vector but DstVTy is a vector of
2968   // pointers or vice-versa. Handle this using a two-step bitcast using an
2969   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2970   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2971          "Only one type should be a pointer type");
2972   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2973          "Only one type should be a floating point type");
2974   Type *IntTy =
2975       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2976   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2977   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2978   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2979 }
2980 
2981 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2982                                                          BasicBlock *Bypass) {
2983   Value *Count = getOrCreateTripCount(L);
2984   // Reuse existing vector loop preheader for TC checks.
2985   // Note that new preheader block is generated for vector loop.
2986   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2987   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2988 
2989   // Generate code to check if the loop's trip count is less than VF * UF, or
2990   // equal to it in case a scalar epilogue is required; this implies that the
2991   // vector trip count is zero. This check also covers the case where adding one
2992   // to the backedge-taken count overflowed leading to an incorrect trip count
2993   // of zero. In this case we will also jump to the scalar loop.
2994   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
2995                                             : ICmpInst::ICMP_ULT;
2996 
2997   // If tail is to be folded, vector loop takes care of all iterations.
2998   Value *CheckMinIters = Builder.getFalse();
2999   if (!Cost->foldTailByMasking()) {
3000     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3001     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3002   }
3003   // Create new preheader for vector loop.
3004   LoopVectorPreHeader =
3005       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3006                  "vector.ph");
3007 
3008   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3009                                DT->getNode(Bypass)->getIDom()) &&
3010          "TC check is expected to dominate Bypass");
3011 
3012   // Update dominator for Bypass & LoopExit (if needed).
3013   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3014   if (!Cost->requiresScalarEpilogue(VF))
3015     // If there is an epilogue which must run, there's no edge from the
3016     // middle block to exit blocks  and thus no need to update the immediate
3017     // dominator of the exit blocks.
3018     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3019 
3020   ReplaceInstWithInst(
3021       TCCheckBlock->getTerminator(),
3022       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3023   LoopBypassBlocks.push_back(TCCheckBlock);
3024 }
3025 
3026 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
3027 
3028   BasicBlock *const SCEVCheckBlock =
3029       RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock);
3030   if (!SCEVCheckBlock)
3031     return nullptr;
3032 
3033   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3034            (OptForSizeBasedOnProfile &&
3035             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3036          "Cannot SCEV check stride or overflow when optimizing for size");
3037 
3038 
3039   // Update dominator only if this is first RT check.
3040   if (LoopBypassBlocks.empty()) {
3041     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3042     if (!Cost->requiresScalarEpilogue(VF))
3043       // If there is an epilogue which must run, there's no edge from the
3044       // middle block to exit blocks  and thus no need to update the immediate
3045       // dominator of the exit blocks.
3046       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3047   }
3048 
3049   LoopBypassBlocks.push_back(SCEVCheckBlock);
3050   AddedSafetyChecks = true;
3051   return SCEVCheckBlock;
3052 }
3053 
3054 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3055                                                       BasicBlock *Bypass) {
3056   // VPlan-native path does not do any analysis for runtime checks currently.
3057   if (EnableVPlanNativePath)
3058     return nullptr;
3059 
3060   BasicBlock *const MemCheckBlock =
3061       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3062 
3063   // Check if we generated code that checks in runtime if arrays overlap. We put
3064   // the checks into a separate block to make the more common case of few
3065   // elements faster.
3066   if (!MemCheckBlock)
3067     return nullptr;
3068 
3069   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3070     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3071            "Cannot emit memory checks when optimizing for size, unless forced "
3072            "to vectorize.");
3073     ORE->emit([&]() {
3074       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3075                                         L->getStartLoc(), L->getHeader())
3076              << "Code-size may be reduced by not forcing "
3077                 "vectorization, or by source-code modifications "
3078                 "eliminating the need for runtime checks "
3079                 "(e.g., adding 'restrict').";
3080     });
3081   }
3082 
3083   LoopBypassBlocks.push_back(MemCheckBlock);
3084 
3085   AddedSafetyChecks = true;
3086 
3087   // We currently don't use LoopVersioning for the actual loop cloning but we
3088   // still use it to add the noalias metadata.
3089   LVer = std::make_unique<LoopVersioning>(
3090       *Legal->getLAI(),
3091       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3092       DT, PSE.getSE());
3093   LVer->prepareNoAliasMetadata();
3094   return MemCheckBlock;
3095 }
3096 
3097 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3098   LoopScalarBody = OrigLoop->getHeader();
3099   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3100   assert(LoopVectorPreHeader && "Invalid loop structure");
3101   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3102   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3103          "multiple exit loop without required epilogue?");
3104 
3105   LoopMiddleBlock =
3106       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3107                  LI, nullptr, Twine(Prefix) + "middle.block");
3108   LoopScalarPreHeader =
3109       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3110                  nullptr, Twine(Prefix) + "scalar.ph");
3111 
3112   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3113 
3114   // Set up the middle block terminator.  Two cases:
3115   // 1) If we know that we must execute the scalar epilogue, emit an
3116   //    unconditional branch.
3117   // 2) Otherwise, we must have a single unique exit block (due to how we
3118   //    implement the multiple exit case).  In this case, set up a conditonal
3119   //    branch from the middle block to the loop scalar preheader, and the
3120   //    exit block.  completeLoopSkeleton will update the condition to use an
3121   //    iteration check, if required to decide whether to execute the remainder.
3122   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3123     BranchInst::Create(LoopScalarPreHeader) :
3124     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3125                        Builder.getTrue());
3126   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3127   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3128 
3129   // We intentionally don't let SplitBlock to update LoopInfo since
3130   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3131   // LoopVectorBody is explicitly added to the correct place few lines later.
3132   BasicBlock *LoopVectorBody =
3133       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3134                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3135 
3136   // Update dominator for loop exit.
3137   if (!Cost->requiresScalarEpilogue(VF))
3138     // If there is an epilogue which must run, there's no edge from the
3139     // middle block to exit blocks  and thus no need to update the immediate
3140     // dominator of the exit blocks.
3141     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3142 
3143   // Create and register the new vector loop.
3144   Loop *Lp = LI->AllocateLoop();
3145   Loop *ParentLoop = OrigLoop->getParentLoop();
3146 
3147   // Insert the new loop into the loop nest and register the new basic blocks
3148   // before calling any utilities such as SCEV that require valid LoopInfo.
3149   if (ParentLoop) {
3150     ParentLoop->addChildLoop(Lp);
3151   } else {
3152     LI->addTopLevelLoop(Lp);
3153   }
3154   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3155   return Lp;
3156 }
3157 
3158 void InnerLoopVectorizer::createInductionResumeValues(
3159     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3160   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3161           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3162          "Inconsistent information about additional bypass.");
3163 
3164   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3165   assert(VectorTripCount && L && "Expected valid arguments");
3166   // We are going to resume the execution of the scalar loop.
3167   // Go over all of the induction variables that we found and fix the
3168   // PHIs that are left in the scalar version of the loop.
3169   // The starting values of PHI nodes depend on the counter of the last
3170   // iteration in the vectorized loop.
3171   // If we come from a bypass edge then we need to start from the original
3172   // start value.
3173   Instruction *OldInduction = Legal->getPrimaryInduction();
3174   for (auto &InductionEntry : Legal->getInductionVars()) {
3175     PHINode *OrigPhi = InductionEntry.first;
3176     InductionDescriptor II = InductionEntry.second;
3177 
3178     // Create phi nodes to merge from the  backedge-taken check block.
3179     PHINode *BCResumeVal =
3180         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3181                         LoopScalarPreHeader->getTerminator());
3182     // Copy original phi DL over to the new one.
3183     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3184     Value *&EndValue = IVEndValues[OrigPhi];
3185     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3186     if (OrigPhi == OldInduction) {
3187       // We know what the end value is.
3188       EndValue = VectorTripCount;
3189     } else {
3190       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3191 
3192       // Fast-math-flags propagate from the original induction instruction.
3193       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3194         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3195 
3196       Type *StepType = II.getStep()->getType();
3197       Instruction::CastOps CastOp =
3198           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3199       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3200       Value *Step =
3201           CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3202       EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3203       EndValue->setName("ind.end");
3204 
3205       // Compute the end value for the additional bypass (if applicable).
3206       if (AdditionalBypass.first) {
3207         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3208         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3209                                          StepType, true);
3210         Value *Step =
3211             CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3212         CRD =
3213             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3214         EndValueFromAdditionalBypass =
3215             emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3216         EndValueFromAdditionalBypass->setName("ind.end");
3217       }
3218     }
3219     // The new PHI merges the original incoming value, in case of a bypass,
3220     // or the value at the end of the vectorized loop.
3221     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3222 
3223     // Fix the scalar body counter (PHI node).
3224     // The old induction's phi node in the scalar body needs the truncated
3225     // value.
3226     for (BasicBlock *BB : LoopBypassBlocks)
3227       BCResumeVal->addIncoming(II.getStartValue(), BB);
3228 
3229     if (AdditionalBypass.first)
3230       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3231                                             EndValueFromAdditionalBypass);
3232 
3233     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3234   }
3235 }
3236 
3237 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3238                                                       MDNode *OrigLoopID) {
3239   assert(L && "Expected valid loop.");
3240 
3241   // The trip counts should be cached by now.
3242   Value *Count = getOrCreateTripCount(L);
3243   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3244 
3245   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3246 
3247   // Add a check in the middle block to see if we have completed
3248   // all of the iterations in the first vector loop.  Three cases:
3249   // 1) If we require a scalar epilogue, there is no conditional branch as
3250   //    we unconditionally branch to the scalar preheader.  Do nothing.
3251   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3252   //    Thus if tail is to be folded, we know we don't need to run the
3253   //    remainder and we can use the previous value for the condition (true).
3254   // 3) Otherwise, construct a runtime check.
3255   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3256     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3257                                         Count, VectorTripCount, "cmp.n",
3258                                         LoopMiddleBlock->getTerminator());
3259 
3260     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3261     // of the corresponding compare because they may have ended up with
3262     // different line numbers and we want to avoid awkward line stepping while
3263     // debugging. Eg. if the compare has got a line number inside the loop.
3264     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3265     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3266   }
3267 
3268   // Get ready to start creating new instructions into the vectorized body.
3269   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3270          "Inconsistent vector loop preheader");
3271 
3272 #ifdef EXPENSIVE_CHECKS
3273   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3274   LI->verify(*DT);
3275 #endif
3276 
3277   return LoopVectorPreHeader;
3278 }
3279 
3280 std::pair<BasicBlock *, Value *>
3281 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3282   /*
3283    In this function we generate a new loop. The new loop will contain
3284    the vectorized instructions while the old loop will continue to run the
3285    scalar remainder.
3286 
3287        [ ] <-- loop iteration number check.
3288     /   |
3289    /    v
3290   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3291   |  /  |
3292   | /   v
3293   ||   [ ]     <-- vector pre header.
3294   |/    |
3295   |     v
3296   |    [  ] \
3297   |    [  ]_|   <-- vector loop.
3298   |     |
3299   |     v
3300   \   -[ ]   <--- middle-block.
3301    \/   |
3302    /\   v
3303    | ->[ ]     <--- new preheader.
3304    |    |
3305  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3306    |   [ ] \
3307    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3308     \   |
3309      \  v
3310       >[ ]     <-- exit block(s).
3311    ...
3312    */
3313 
3314   // Get the metadata of the original loop before it gets modified.
3315   MDNode *OrigLoopID = OrigLoop->getLoopID();
3316 
3317   // Workaround!  Compute the trip count of the original loop and cache it
3318   // before we start modifying the CFG.  This code has a systemic problem
3319   // wherein it tries to run analysis over partially constructed IR; this is
3320   // wrong, and not simply for SCEV.  The trip count of the original loop
3321   // simply happens to be prone to hitting this in practice.  In theory, we
3322   // can hit the same issue for any SCEV, or ValueTracking query done during
3323   // mutation.  See PR49900.
3324   getOrCreateTripCount(OrigLoop);
3325 
3326   // Create an empty vector loop, and prepare basic blocks for the runtime
3327   // checks.
3328   Loop *Lp = createVectorLoopSkeleton("");
3329 
3330   // Now, compare the new count to zero. If it is zero skip the vector loop and
3331   // jump to the scalar loop. This check also covers the case where the
3332   // backedge-taken count is uint##_max: adding one to it will overflow leading
3333   // to an incorrect trip count of zero. In this (rare) case we will also jump
3334   // to the scalar loop.
3335   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3336 
3337   // Generate the code to check any assumptions that we've made for SCEV
3338   // expressions.
3339   emitSCEVChecks(LoopScalarPreHeader);
3340 
3341   // Generate the code that checks in runtime if arrays overlap. We put the
3342   // checks into a separate block to make the more common case of few elements
3343   // faster.
3344   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3345 
3346   createHeaderBranch(Lp);
3347 
3348   // Emit phis for the new starting index of the scalar loop.
3349   createInductionResumeValues(Lp);
3350 
3351   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3352 }
3353 
3354 // Fix up external users of the induction variable. At this point, we are
3355 // in LCSSA form, with all external PHIs that use the IV having one input value,
3356 // coming from the remainder loop. We need those PHIs to also have a correct
3357 // value for the IV when arriving directly from the middle block.
3358 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3359                                        const InductionDescriptor &II,
3360                                        Value *CountRoundDown, Value *EndValue,
3361                                        BasicBlock *MiddleBlock,
3362                                        BasicBlock *VectorHeader) {
3363   // There are two kinds of external IV usages - those that use the value
3364   // computed in the last iteration (the PHI) and those that use the penultimate
3365   // value (the value that feeds into the phi from the loop latch).
3366   // We allow both, but they, obviously, have different values.
3367 
3368   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3369 
3370   DenseMap<Value *, Value *> MissingVals;
3371 
3372   // An external user of the last iteration's value should see the value that
3373   // the remainder loop uses to initialize its own IV.
3374   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3375   for (User *U : PostInc->users()) {
3376     Instruction *UI = cast<Instruction>(U);
3377     if (!OrigLoop->contains(UI)) {
3378       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3379       MissingVals[UI] = EndValue;
3380     }
3381   }
3382 
3383   // An external user of the penultimate value need to see EndValue - Step.
3384   // The simplest way to get this is to recompute it from the constituent SCEVs,
3385   // that is Start + (Step * (CRD - 1)).
3386   for (User *U : OrigPhi->users()) {
3387     auto *UI = cast<Instruction>(U);
3388     if (!OrigLoop->contains(UI)) {
3389       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3390 
3391       IRBuilder<> B(MiddleBlock->getTerminator());
3392 
3393       // Fast-math-flags propagate from the original induction instruction.
3394       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3395         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3396 
3397       Value *CountMinusOne = B.CreateSub(
3398           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3399       Value *CMO =
3400           !II.getStep()->getType()->isIntegerTy()
3401               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3402                              II.getStep()->getType())
3403               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3404       CMO->setName("cast.cmo");
3405 
3406       Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
3407                                     VectorHeader->getTerminator());
3408       Value *Escape =
3409           emitTransformedIndex(B, CMO, II.getStartValue(), Step, II);
3410       Escape->setName("ind.escape");
3411       MissingVals[UI] = Escape;
3412     }
3413   }
3414 
3415   for (auto &I : MissingVals) {
3416     PHINode *PHI = cast<PHINode>(I.first);
3417     // One corner case we have to handle is two IVs "chasing" each-other,
3418     // that is %IV2 = phi [...], [ %IV1, %latch ]
3419     // In this case, if IV1 has an external use, we need to avoid adding both
3420     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3421     // don't already have an incoming value for the middle block.
3422     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3423       PHI->addIncoming(I.second, MiddleBlock);
3424   }
3425 }
3426 
3427 namespace {
3428 
3429 struct CSEDenseMapInfo {
3430   static bool canHandle(const Instruction *I) {
3431     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3432            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3433   }
3434 
3435   static inline Instruction *getEmptyKey() {
3436     return DenseMapInfo<Instruction *>::getEmptyKey();
3437   }
3438 
3439   static inline Instruction *getTombstoneKey() {
3440     return DenseMapInfo<Instruction *>::getTombstoneKey();
3441   }
3442 
3443   static unsigned getHashValue(const Instruction *I) {
3444     assert(canHandle(I) && "Unknown instruction!");
3445     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3446                                                            I->value_op_end()));
3447   }
3448 
3449   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3450     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3451         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3452       return LHS == RHS;
3453     return LHS->isIdenticalTo(RHS);
3454   }
3455 };
3456 
3457 } // end anonymous namespace
3458 
3459 ///Perform cse of induction variable instructions.
3460 static void cse(BasicBlock *BB) {
3461   // Perform simple cse.
3462   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3463   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3464     if (!CSEDenseMapInfo::canHandle(&In))
3465       continue;
3466 
3467     // Check if we can replace this instruction with any of the
3468     // visited instructions.
3469     if (Instruction *V = CSEMap.lookup(&In)) {
3470       In.replaceAllUsesWith(V);
3471       In.eraseFromParent();
3472       continue;
3473     }
3474 
3475     CSEMap[&In] = &In;
3476   }
3477 }
3478 
3479 InstructionCost
3480 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3481                                               bool &NeedToScalarize) const {
3482   Function *F = CI->getCalledFunction();
3483   Type *ScalarRetTy = CI->getType();
3484   SmallVector<Type *, 4> Tys, ScalarTys;
3485   for (auto &ArgOp : CI->args())
3486     ScalarTys.push_back(ArgOp->getType());
3487 
3488   // Estimate cost of scalarized vector call. The source operands are assumed
3489   // to be vectors, so we need to extract individual elements from there,
3490   // execute VF scalar calls, and then gather the result into the vector return
3491   // value.
3492   InstructionCost ScalarCallCost =
3493       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3494   if (VF.isScalar())
3495     return ScalarCallCost;
3496 
3497   // Compute corresponding vector type for return value and arguments.
3498   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3499   for (Type *ScalarTy : ScalarTys)
3500     Tys.push_back(ToVectorTy(ScalarTy, VF));
3501 
3502   // Compute costs of unpacking argument values for the scalar calls and
3503   // packing the return values to a vector.
3504   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3505 
3506   InstructionCost Cost =
3507       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3508 
3509   // If we can't emit a vector call for this function, then the currently found
3510   // cost is the cost we need to return.
3511   NeedToScalarize = true;
3512   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3513   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3514 
3515   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3516     return Cost;
3517 
3518   // If the corresponding vector cost is cheaper, return its cost.
3519   InstructionCost VectorCallCost =
3520       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3521   if (VectorCallCost < Cost) {
3522     NeedToScalarize = false;
3523     Cost = VectorCallCost;
3524   }
3525   return Cost;
3526 }
3527 
3528 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3529   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3530     return Elt;
3531   return VectorType::get(Elt, VF);
3532 }
3533 
3534 InstructionCost
3535 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3536                                                    ElementCount VF) const {
3537   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3538   assert(ID && "Expected intrinsic call!");
3539   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3540   FastMathFlags FMF;
3541   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3542     FMF = FPMO->getFastMathFlags();
3543 
3544   SmallVector<const Value *> Arguments(CI->args());
3545   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3546   SmallVector<Type *> ParamTys;
3547   std::transform(FTy->param_begin(), FTy->param_end(),
3548                  std::back_inserter(ParamTys),
3549                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3550 
3551   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3552                                     dyn_cast<IntrinsicInst>(CI));
3553   return TTI.getIntrinsicInstrCost(CostAttrs,
3554                                    TargetTransformInfo::TCK_RecipThroughput);
3555 }
3556 
3557 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3558   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3559   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3560   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3561 }
3562 
3563 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3564   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3565   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3566   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3567 }
3568 
3569 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3570   // For every instruction `I` in MinBWs, truncate the operands, create a
3571   // truncated version of `I` and reextend its result. InstCombine runs
3572   // later and will remove any ext/trunc pairs.
3573   SmallPtrSet<Value *, 4> Erased;
3574   for (const auto &KV : Cost->getMinimalBitwidths()) {
3575     // If the value wasn't vectorized, we must maintain the original scalar
3576     // type. The absence of the value from State indicates that it
3577     // wasn't vectorized.
3578     // FIXME: Should not rely on getVPValue at this point.
3579     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3580     if (!State.hasAnyVectorValue(Def))
3581       continue;
3582     for (unsigned Part = 0; Part < UF; ++Part) {
3583       Value *I = State.get(Def, Part);
3584       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3585         continue;
3586       Type *OriginalTy = I->getType();
3587       Type *ScalarTruncatedTy =
3588           IntegerType::get(OriginalTy->getContext(), KV.second);
3589       auto *TruncatedTy = VectorType::get(
3590           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3591       if (TruncatedTy == OriginalTy)
3592         continue;
3593 
3594       IRBuilder<> B(cast<Instruction>(I));
3595       auto ShrinkOperand = [&](Value *V) -> Value * {
3596         if (auto *ZI = dyn_cast<ZExtInst>(V))
3597           if (ZI->getSrcTy() == TruncatedTy)
3598             return ZI->getOperand(0);
3599         return B.CreateZExtOrTrunc(V, TruncatedTy);
3600       };
3601 
3602       // The actual instruction modification depends on the instruction type,
3603       // unfortunately.
3604       Value *NewI = nullptr;
3605       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3606         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3607                              ShrinkOperand(BO->getOperand(1)));
3608 
3609         // Any wrapping introduced by shrinking this operation shouldn't be
3610         // considered undefined behavior. So, we can't unconditionally copy
3611         // arithmetic wrapping flags to NewI.
3612         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3613       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3614         NewI =
3615             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3616                          ShrinkOperand(CI->getOperand(1)));
3617       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3618         NewI = B.CreateSelect(SI->getCondition(),
3619                               ShrinkOperand(SI->getTrueValue()),
3620                               ShrinkOperand(SI->getFalseValue()));
3621       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3622         switch (CI->getOpcode()) {
3623         default:
3624           llvm_unreachable("Unhandled cast!");
3625         case Instruction::Trunc:
3626           NewI = ShrinkOperand(CI->getOperand(0));
3627           break;
3628         case Instruction::SExt:
3629           NewI = B.CreateSExtOrTrunc(
3630               CI->getOperand(0),
3631               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3632           break;
3633         case Instruction::ZExt:
3634           NewI = B.CreateZExtOrTrunc(
3635               CI->getOperand(0),
3636               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3637           break;
3638         }
3639       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3640         auto Elements0 =
3641             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3642         auto *O0 = B.CreateZExtOrTrunc(
3643             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3644         auto Elements1 =
3645             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3646         auto *O1 = B.CreateZExtOrTrunc(
3647             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3648 
3649         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3650       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3651         // Don't do anything with the operands, just extend the result.
3652         continue;
3653       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3654         auto Elements =
3655             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3656         auto *O0 = B.CreateZExtOrTrunc(
3657             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3658         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3659         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3660       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3661         auto Elements =
3662             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3663         auto *O0 = B.CreateZExtOrTrunc(
3664             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3665         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3666       } else {
3667         // If we don't know what to do, be conservative and don't do anything.
3668         continue;
3669       }
3670 
3671       // Lastly, extend the result.
3672       NewI->takeName(cast<Instruction>(I));
3673       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3674       I->replaceAllUsesWith(Res);
3675       cast<Instruction>(I)->eraseFromParent();
3676       Erased.insert(I);
3677       State.reset(Def, Res, Part);
3678     }
3679   }
3680 
3681   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3682   for (const auto &KV : Cost->getMinimalBitwidths()) {
3683     // If the value wasn't vectorized, we must maintain the original scalar
3684     // type. The absence of the value from State indicates that it
3685     // wasn't vectorized.
3686     // FIXME: Should not rely on getVPValue at this point.
3687     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3688     if (!State.hasAnyVectorValue(Def))
3689       continue;
3690     for (unsigned Part = 0; Part < UF; ++Part) {
3691       Value *I = State.get(Def, Part);
3692       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3693       if (Inst && Inst->use_empty()) {
3694         Value *NewI = Inst->getOperand(0);
3695         Inst->eraseFromParent();
3696         State.reset(Def, NewI, Part);
3697       }
3698     }
3699   }
3700 }
3701 
3702 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3703   // Insert truncates and extends for any truncated instructions as hints to
3704   // InstCombine.
3705   if (VF.isVector())
3706     truncateToMinimalBitwidths(State);
3707 
3708   // Fix widened non-induction PHIs by setting up the PHI operands.
3709   if (OrigPHIsToFix.size()) {
3710     assert(EnableVPlanNativePath &&
3711            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3712     fixNonInductionPHIs(State);
3713   }
3714 
3715   // At this point every instruction in the original loop is widened to a
3716   // vector form. Now we need to fix the recurrences in the loop. These PHI
3717   // nodes are currently empty because we did not want to introduce cycles.
3718   // This is the second stage of vectorizing recurrences.
3719   fixCrossIterationPHIs(State);
3720 
3721   // Forget the original basic block.
3722   PSE.getSE()->forgetLoop(OrigLoop);
3723 
3724   Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB);
3725   // If we inserted an edge from the middle block to the unique exit block,
3726   // update uses outside the loop (phis) to account for the newly inserted
3727   // edge.
3728   if (!Cost->requiresScalarEpilogue(VF)) {
3729     // Fix-up external users of the induction variables.
3730     for (auto &Entry : Legal->getInductionVars())
3731       fixupIVUsers(
3732           Entry.first, Entry.second, getOrCreateVectorTripCount(VectorLoop),
3733           IVEndValues[Entry.first], LoopMiddleBlock, VectorLoop->getHeader());
3734 
3735     fixLCSSAPHIs(State);
3736   }
3737 
3738   for (Instruction *PI : PredicatedInstructions)
3739     sinkScalarOperands(&*PI);
3740 
3741   // Remove redundant induction instructions.
3742   cse(VectorLoop->getHeader());
3743 
3744   // Set/update profile weights for the vector and remainder loops as original
3745   // loop iterations are now distributed among them. Note that original loop
3746   // represented by LoopScalarBody becomes remainder loop after vectorization.
3747   //
3748   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3749   // end up getting slightly roughened result but that should be OK since
3750   // profile is not inherently precise anyway. Note also possible bypass of
3751   // vector code caused by legality checks is ignored, assigning all the weight
3752   // to the vector loop, optimistically.
3753   //
3754   // For scalable vectorization we can't know at compile time how many iterations
3755   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3756   // vscale of '1'.
3757   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop,
3758                                LI->getLoopFor(LoopScalarBody),
3759                                VF.getKnownMinValue() * UF);
3760 }
3761 
3762 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3763   // In order to support recurrences we need to be able to vectorize Phi nodes.
3764   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3765   // stage #2: We now need to fix the recurrences by adding incoming edges to
3766   // the currently empty PHI nodes. At this point every instruction in the
3767   // original loop is widened to a vector form so we can use them to construct
3768   // the incoming edges.
3769   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
3770   for (VPRecipeBase &R : Header->phis()) {
3771     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
3772       fixReduction(ReductionPhi, State);
3773     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3774       fixFirstOrderRecurrence(FOR, State);
3775   }
3776 }
3777 
3778 void InnerLoopVectorizer::fixFirstOrderRecurrence(
3779     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
3780   // This is the second phase of vectorizing first-order recurrences. An
3781   // overview of the transformation is described below. Suppose we have the
3782   // following loop.
3783   //
3784   //   for (int i = 0; i < n; ++i)
3785   //     b[i] = a[i] - a[i - 1];
3786   //
3787   // There is a first-order recurrence on "a". For this loop, the shorthand
3788   // scalar IR looks like:
3789   //
3790   //   scalar.ph:
3791   //     s_init = a[-1]
3792   //     br scalar.body
3793   //
3794   //   scalar.body:
3795   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3796   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3797   //     s2 = a[i]
3798   //     b[i] = s2 - s1
3799   //     br cond, scalar.body, ...
3800   //
3801   // In this example, s1 is a recurrence because it's value depends on the
3802   // previous iteration. In the first phase of vectorization, we created a
3803   // vector phi v1 for s1. We now complete the vectorization and produce the
3804   // shorthand vector IR shown below (for VF = 4, UF = 1).
3805   //
3806   //   vector.ph:
3807   //     v_init = vector(..., ..., ..., a[-1])
3808   //     br vector.body
3809   //
3810   //   vector.body
3811   //     i = phi [0, vector.ph], [i+4, vector.body]
3812   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3813   //     v2 = a[i, i+1, i+2, i+3];
3814   //     v3 = vector(v1(3), v2(0, 1, 2))
3815   //     b[i, i+1, i+2, i+3] = v2 - v3
3816   //     br cond, vector.body, middle.block
3817   //
3818   //   middle.block:
3819   //     x = v2(3)
3820   //     br scalar.ph
3821   //
3822   //   scalar.ph:
3823   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3824   //     br scalar.body
3825   //
3826   // After execution completes the vector loop, we extract the next value of
3827   // the recurrence (x) to use as the initial value in the scalar loop.
3828 
3829   // Extract the last vector element in the middle block. This will be the
3830   // initial value for the recurrence when jumping to the scalar loop.
3831   VPValue *PreviousDef = PhiR->getBackedgeValue();
3832   Value *Incoming = State.get(PreviousDef, UF - 1);
3833   auto *ExtractForScalar = Incoming;
3834   auto *IdxTy = Builder.getInt32Ty();
3835   if (VF.isVector()) {
3836     auto *One = ConstantInt::get(IdxTy, 1);
3837     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3838     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3839     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3840     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
3841                                                     "vector.recur.extract");
3842   }
3843   // Extract the second last element in the middle block if the
3844   // Phi is used outside the loop. We need to extract the phi itself
3845   // and not the last element (the phi update in the current iteration). This
3846   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3847   // when the scalar loop is not run at all.
3848   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3849   if (VF.isVector()) {
3850     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3851     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
3852     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3853         Incoming, Idx, "vector.recur.extract.for.phi");
3854   } else if (UF > 1)
3855     // When loop is unrolled without vectorizing, initialize
3856     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
3857     // of `Incoming`. This is analogous to the vectorized case above: extracting
3858     // the second last element when VF > 1.
3859     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
3860 
3861   // Fix the initial value of the original recurrence in the scalar loop.
3862   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3863   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
3864   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3865   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
3866   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3867     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3868     Start->addIncoming(Incoming, BB);
3869   }
3870 
3871   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3872   Phi->setName("scalar.recur");
3873 
3874   // Finally, fix users of the recurrence outside the loop. The users will need
3875   // either the last value of the scalar recurrence or the last value of the
3876   // vector recurrence we extracted in the middle block. Since the loop is in
3877   // LCSSA form, we just need to find all the phi nodes for the original scalar
3878   // recurrence in the exit block, and then add an edge for the middle block.
3879   // Note that LCSSA does not imply single entry when the original scalar loop
3880   // had multiple exiting edges (as we always run the last iteration in the
3881   // scalar epilogue); in that case, there is no edge from middle to exit and
3882   // and thus no phis which needed updated.
3883   if (!Cost->requiresScalarEpilogue(VF))
3884     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
3885       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
3886         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3887 }
3888 
3889 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
3890                                        VPTransformState &State) {
3891   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
3892   // Get it's reduction variable descriptor.
3893   assert(Legal->isReductionVariable(OrigPhi) &&
3894          "Unable to find the reduction variable");
3895   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
3896 
3897   RecurKind RK = RdxDesc.getRecurrenceKind();
3898   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3899   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3900   setDebugLocFromInst(ReductionStartValue);
3901 
3902   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
3903   // This is the vector-clone of the value that leaves the loop.
3904   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
3905 
3906   // Wrap flags are in general invalid after vectorization, clear them.
3907   clearReductionWrapFlags(RdxDesc, State);
3908 
3909   // Before each round, move the insertion point right between
3910   // the PHIs and the values we are going to write.
3911   // This allows us to write both PHINodes and the extractelement
3912   // instructions.
3913   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3914 
3915   setDebugLocFromInst(LoopExitInst);
3916 
3917   Type *PhiTy = OrigPhi->getType();
3918   BasicBlock *VectorLoopLatch =
3919       LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
3920   // If tail is folded by masking, the vector value to leave the loop should be
3921   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3922   // instead of the former. For an inloop reduction the reduction will already
3923   // be predicated, and does not need to be handled here.
3924   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
3925     for (unsigned Part = 0; Part < UF; ++Part) {
3926       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
3927       Value *Sel = nullptr;
3928       for (User *U : VecLoopExitInst->users()) {
3929         if (isa<SelectInst>(U)) {
3930           assert(!Sel && "Reduction exit feeding two selects");
3931           Sel = U;
3932         } else
3933           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3934       }
3935       assert(Sel && "Reduction exit feeds no select");
3936       State.reset(LoopExitInstDef, Sel, Part);
3937 
3938       // If the target can create a predicated operator for the reduction at no
3939       // extra cost in the loop (for example a predicated vadd), it can be
3940       // cheaper for the select to remain in the loop than be sunk out of it,
3941       // and so use the select value for the phi instead of the old
3942       // LoopExitValue.
3943       if (PreferPredicatedReductionSelect ||
3944           TTI->preferPredicatedReductionSelect(
3945               RdxDesc.getOpcode(), PhiTy,
3946               TargetTransformInfo::ReductionFlags())) {
3947         auto *VecRdxPhi =
3948             cast<PHINode>(State.get(PhiR, Part));
3949         VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel);
3950       }
3951     }
3952   }
3953 
3954   // If the vector reduction can be performed in a smaller type, we truncate
3955   // then extend the loop exit value to enable InstCombine to evaluate the
3956   // entire expression in the smaller type.
3957   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
3958     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
3959     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3960     Builder.SetInsertPoint(VectorLoopLatch->getTerminator());
3961     VectorParts RdxParts(UF);
3962     for (unsigned Part = 0; Part < UF; ++Part) {
3963       RdxParts[Part] = State.get(LoopExitInstDef, Part);
3964       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3965       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3966                                         : Builder.CreateZExt(Trunc, VecTy);
3967       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
3968         if (U != Trunc) {
3969           U->replaceUsesOfWith(RdxParts[Part], Extnd);
3970           RdxParts[Part] = Extnd;
3971         }
3972     }
3973     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3974     for (unsigned Part = 0; Part < UF; ++Part) {
3975       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3976       State.reset(LoopExitInstDef, RdxParts[Part], Part);
3977     }
3978   }
3979 
3980   // Reduce all of the unrolled parts into a single vector.
3981   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
3982   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
3983 
3984   // The middle block terminator has already been assigned a DebugLoc here (the
3985   // OrigLoop's single latch terminator). We want the whole middle block to
3986   // appear to execute on this line because: (a) it is all compiler generated,
3987   // (b) these instructions are always executed after evaluating the latch
3988   // conditional branch, and (c) other passes may add new predecessors which
3989   // terminate on this line. This is the easiest way to ensure we don't
3990   // accidentally cause an extra step back into the loop while debugging.
3991   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
3992   if (PhiR->isOrdered())
3993     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
3994   else {
3995     // Floating-point operations should have some FMF to enable the reduction.
3996     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
3997     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
3998     for (unsigned Part = 1; Part < UF; ++Part) {
3999       Value *RdxPart = State.get(LoopExitInstDef, Part);
4000       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4001         ReducedPartRdx = Builder.CreateBinOp(
4002             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4003       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4004         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4005                                            ReducedPartRdx, RdxPart);
4006       else
4007         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4008     }
4009   }
4010 
4011   // Create the reduction after the loop. Note that inloop reductions create the
4012   // target reduction in the loop using a Reduction recipe.
4013   if (VF.isVector() && !PhiR->isInLoop()) {
4014     ReducedPartRdx =
4015         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4016     // If the reduction can be performed in a smaller type, we need to extend
4017     // the reduction to the wider type before we branch to the original loop.
4018     if (PhiTy != RdxDesc.getRecurrenceType())
4019       ReducedPartRdx = RdxDesc.isSigned()
4020                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4021                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4022   }
4023 
4024   PHINode *ResumePhi =
4025       dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
4026 
4027   // Create a phi node that merges control-flow from the backedge-taken check
4028   // block and the middle block.
4029   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4030                                         LoopScalarPreHeader->getTerminator());
4031 
4032   // If we are fixing reductions in the epilogue loop then we should already
4033   // have created a bc.merge.rdx Phi after the main vector body. Ensure that
4034   // we carry over the incoming values correctly.
4035   for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
4036     if (Incoming == LoopMiddleBlock)
4037       BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
4038     else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
4039       BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
4040                               Incoming);
4041     else
4042       BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
4043   }
4044 
4045   // Set the resume value for this reduction
4046   ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
4047 
4048   // Now, we need to fix the users of the reduction variable
4049   // inside and outside of the scalar remainder loop.
4050 
4051   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4052   // in the exit blocks.  See comment on analogous loop in
4053   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4054   if (!Cost->requiresScalarEpilogue(VF))
4055     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4056       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4057         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4058 
4059   // Fix the scalar loop reduction variable with the incoming reduction sum
4060   // from the vector body and from the backedge value.
4061   int IncomingEdgeBlockIdx =
4062       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4063   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4064   // Pick the other block.
4065   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4066   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4067   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4068 }
4069 
4070 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4071                                                   VPTransformState &State) {
4072   RecurKind RK = RdxDesc.getRecurrenceKind();
4073   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4074     return;
4075 
4076   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4077   assert(LoopExitInstr && "null loop exit instruction");
4078   SmallVector<Instruction *, 8> Worklist;
4079   SmallPtrSet<Instruction *, 8> Visited;
4080   Worklist.push_back(LoopExitInstr);
4081   Visited.insert(LoopExitInstr);
4082 
4083   while (!Worklist.empty()) {
4084     Instruction *Cur = Worklist.pop_back_val();
4085     if (isa<OverflowingBinaryOperator>(Cur))
4086       for (unsigned Part = 0; Part < UF; ++Part) {
4087         // FIXME: Should not rely on getVPValue at this point.
4088         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4089         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4090       }
4091 
4092     for (User *U : Cur->users()) {
4093       Instruction *UI = cast<Instruction>(U);
4094       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4095           Visited.insert(UI).second)
4096         Worklist.push_back(UI);
4097     }
4098   }
4099 }
4100 
4101 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4102   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4103     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4104       // Some phis were already hand updated by the reduction and recurrence
4105       // code above, leave them alone.
4106       continue;
4107 
4108     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4109     // Non-instruction incoming values will have only one value.
4110 
4111     VPLane Lane = VPLane::getFirstLane();
4112     if (isa<Instruction>(IncomingValue) &&
4113         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4114                                            VF))
4115       Lane = VPLane::getLastLaneForVF(VF);
4116 
4117     // Can be a loop invariant incoming value or the last scalar value to be
4118     // extracted from the vectorized loop.
4119     // FIXME: Should not rely on getVPValue at this point.
4120     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4121     Value *lastIncomingValue =
4122         OrigLoop->isLoopInvariant(IncomingValue)
4123             ? IncomingValue
4124             : State.get(State.Plan->getVPValue(IncomingValue, true),
4125                         VPIteration(UF - 1, Lane));
4126     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4127   }
4128 }
4129 
4130 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4131   // The basic block and loop containing the predicated instruction.
4132   auto *PredBB = PredInst->getParent();
4133   auto *VectorLoop = LI->getLoopFor(PredBB);
4134 
4135   // Initialize a worklist with the operands of the predicated instruction.
4136   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4137 
4138   // Holds instructions that we need to analyze again. An instruction may be
4139   // reanalyzed if we don't yet know if we can sink it or not.
4140   SmallVector<Instruction *, 8> InstsToReanalyze;
4141 
4142   // Returns true if a given use occurs in the predicated block. Phi nodes use
4143   // their operands in their corresponding predecessor blocks.
4144   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4145     auto *I = cast<Instruction>(U.getUser());
4146     BasicBlock *BB = I->getParent();
4147     if (auto *Phi = dyn_cast<PHINode>(I))
4148       BB = Phi->getIncomingBlock(
4149           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4150     return BB == PredBB;
4151   };
4152 
4153   // Iteratively sink the scalarized operands of the predicated instruction
4154   // into the block we created for it. When an instruction is sunk, it's
4155   // operands are then added to the worklist. The algorithm ends after one pass
4156   // through the worklist doesn't sink a single instruction.
4157   bool Changed;
4158   do {
4159     // Add the instructions that need to be reanalyzed to the worklist, and
4160     // reset the changed indicator.
4161     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4162     InstsToReanalyze.clear();
4163     Changed = false;
4164 
4165     while (!Worklist.empty()) {
4166       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4167 
4168       // We can't sink an instruction if it is a phi node, is not in the loop,
4169       // or may have side effects.
4170       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4171           I->mayHaveSideEffects())
4172         continue;
4173 
4174       // If the instruction is already in PredBB, check if we can sink its
4175       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4176       // sinking the scalar instruction I, hence it appears in PredBB; but it
4177       // may have failed to sink I's operands (recursively), which we try
4178       // (again) here.
4179       if (I->getParent() == PredBB) {
4180         Worklist.insert(I->op_begin(), I->op_end());
4181         continue;
4182       }
4183 
4184       // It's legal to sink the instruction if all its uses occur in the
4185       // predicated block. Otherwise, there's nothing to do yet, and we may
4186       // need to reanalyze the instruction.
4187       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4188         InstsToReanalyze.push_back(I);
4189         continue;
4190       }
4191 
4192       // Move the instruction to the beginning of the predicated block, and add
4193       // it's operands to the worklist.
4194       I->moveBefore(&*PredBB->getFirstInsertionPt());
4195       Worklist.insert(I->op_begin(), I->op_end());
4196 
4197       // The sinking may have enabled other instructions to be sunk, so we will
4198       // need to iterate.
4199       Changed = true;
4200     }
4201   } while (Changed);
4202 }
4203 
4204 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4205   for (PHINode *OrigPhi : OrigPHIsToFix) {
4206     VPWidenPHIRecipe *VPPhi =
4207         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4208     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4209     // Make sure the builder has a valid insert point.
4210     Builder.SetInsertPoint(NewPhi);
4211     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4212       VPValue *Inc = VPPhi->getIncomingValue(i);
4213       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4214       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4215     }
4216   }
4217 }
4218 
4219 bool InnerLoopVectorizer::useOrderedReductions(
4220     const RecurrenceDescriptor &RdxDesc) {
4221   return Cost->useOrderedReductions(RdxDesc);
4222 }
4223 
4224 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4225                                               VPWidenPHIRecipe *PhiR,
4226                                               VPTransformState &State) {
4227   PHINode *P = cast<PHINode>(PN);
4228   if (EnableVPlanNativePath) {
4229     // Currently we enter here in the VPlan-native path for non-induction
4230     // PHIs where all control flow is uniform. We simply widen these PHIs.
4231     // Create a vector phi with no operands - the vector phi operands will be
4232     // set at the end of vector code generation.
4233     Type *VecTy = (State.VF.isScalar())
4234                       ? PN->getType()
4235                       : VectorType::get(PN->getType(), State.VF);
4236     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4237     State.set(PhiR, VecPhi, 0);
4238     OrigPHIsToFix.push_back(P);
4239 
4240     return;
4241   }
4242 
4243   assert(PN->getParent() == OrigLoop->getHeader() &&
4244          "Non-header phis should have been handled elsewhere");
4245 
4246   // In order to support recurrences we need to be able to vectorize Phi nodes.
4247   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4248   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4249   // this value when we vectorize all of the instructions that use the PHI.
4250 
4251   assert(!Legal->isReductionVariable(P) &&
4252          "reductions should be handled elsewhere");
4253 
4254   setDebugLocFromInst(P);
4255 
4256   // This PHINode must be an induction variable.
4257   // Make sure that we know about it.
4258   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4259 
4260   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4261   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4262 
4263   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4264   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4265 
4266   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4267   // which can be found from the original scalar operations.
4268   switch (II.getKind()) {
4269   case InductionDescriptor::IK_NoInduction:
4270     llvm_unreachable("Unknown induction");
4271   case InductionDescriptor::IK_IntInduction:
4272   case InductionDescriptor::IK_FpInduction:
4273     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4274   case InductionDescriptor::IK_PtrInduction: {
4275     // Handle the pointer induction variable case.
4276     assert(P->getType()->isPointerTy() && "Unexpected type.");
4277 
4278     if (all_of(PhiR->users(), [PhiR](const VPUser *U) {
4279           return cast<VPRecipeBase>(U)->usesScalars(PhiR);
4280         })) {
4281       // This is the normalized GEP that starts counting at zero.
4282       Value *PtrInd =
4283           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4284       // Determine the number of scalars we need to generate for each unroll
4285       // iteration. If the instruction is uniform, we only need to generate the
4286       // first lane. Otherwise, we generate all VF values.
4287       bool IsUniform = vputils::onlyFirstLaneUsed(PhiR);
4288       assert((IsUniform || !State.VF.isScalable()) &&
4289              "Cannot scalarize a scalable VF");
4290       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4291 
4292       for (unsigned Part = 0; Part < UF; ++Part) {
4293         Value *PartStart =
4294             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4295 
4296         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4297           Value *Idx = Builder.CreateAdd(
4298               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4299           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4300 
4301           Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
4302                                         State.CFG.PrevBB->getTerminator());
4303           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx,
4304                                                 II.getStartValue(), Step, II);
4305           SclrGep->setName("next.gep");
4306           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4307         }
4308       }
4309       return;
4310     }
4311     assert(isa<SCEVConstant>(II.getStep()) &&
4312            "Induction step not a SCEV constant!");
4313     Type *PhiType = II.getStep()->getType();
4314 
4315     // Build a pointer phi
4316     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4317     Type *ScStValueType = ScalarStartValue->getType();
4318     PHINode *NewPointerPhi =
4319         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4320     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4321 
4322     // A pointer induction, performed by using a gep
4323     BasicBlock *LoopLatch = LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
4324     Instruction *InductionLoc = LoopLatch->getTerminator();
4325     const SCEV *ScalarStep = II.getStep();
4326     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4327     Value *ScalarStepValue =
4328         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4329     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4330     Value *NumUnrolledElems =
4331         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4332     Value *InductionGEP = GetElementPtrInst::Create(
4333         II.getElementType(), NewPointerPhi,
4334         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4335         InductionLoc);
4336     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4337 
4338     // Create UF many actual address geps that use the pointer
4339     // phi as base and a vectorized version of the step value
4340     // (<step*0, ..., step*N>) as offset.
4341     for (unsigned Part = 0; Part < State.UF; ++Part) {
4342       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4343       Value *StartOffsetScalar =
4344           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4345       Value *StartOffset =
4346           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4347       // Create a vector of consecutive numbers from zero to VF.
4348       StartOffset =
4349           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4350 
4351       Value *GEP = Builder.CreateGEP(
4352           II.getElementType(), NewPointerPhi,
4353           Builder.CreateMul(
4354               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4355               "vector.gep"));
4356       State.set(PhiR, GEP, Part);
4357     }
4358   }
4359   }
4360 }
4361 
4362 /// A helper function for checking whether an integer division-related
4363 /// instruction may divide by zero (in which case it must be predicated if
4364 /// executed conditionally in the scalar code).
4365 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4366 /// Non-zero divisors that are non compile-time constants will not be
4367 /// converted into multiplication, so we will still end up scalarizing
4368 /// the division, but can do so w/o predication.
4369 static bool mayDivideByZero(Instruction &I) {
4370   assert((I.getOpcode() == Instruction::UDiv ||
4371           I.getOpcode() == Instruction::SDiv ||
4372           I.getOpcode() == Instruction::URem ||
4373           I.getOpcode() == Instruction::SRem) &&
4374          "Unexpected instruction");
4375   Value *Divisor = I.getOperand(1);
4376   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4377   return !CInt || CInt->isZero();
4378 }
4379 
4380 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4381                                                VPUser &ArgOperands,
4382                                                VPTransformState &State) {
4383   assert(!isa<DbgInfoIntrinsic>(I) &&
4384          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4385   setDebugLocFromInst(&I);
4386 
4387   Module *M = I.getParent()->getParent()->getParent();
4388   auto *CI = cast<CallInst>(&I);
4389 
4390   SmallVector<Type *, 4> Tys;
4391   for (Value *ArgOperand : CI->args())
4392     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4393 
4394   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4395 
4396   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4397   // version of the instruction.
4398   // Is it beneficial to perform intrinsic call compared to lib call?
4399   bool NeedToScalarize = false;
4400   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4401   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4402   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4403   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4404          "Instruction should be scalarized elsewhere.");
4405   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4406          "Either the intrinsic cost or vector call cost must be valid");
4407 
4408   for (unsigned Part = 0; Part < UF; ++Part) {
4409     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4410     SmallVector<Value *, 4> Args;
4411     for (auto &I : enumerate(ArgOperands.operands())) {
4412       // Some intrinsics have a scalar argument - don't replace it with a
4413       // vector.
4414       Value *Arg;
4415       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4416         Arg = State.get(I.value(), Part);
4417       else {
4418         Arg = State.get(I.value(), VPIteration(0, 0));
4419         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4420           TysForDecl.push_back(Arg->getType());
4421       }
4422       Args.push_back(Arg);
4423     }
4424 
4425     Function *VectorF;
4426     if (UseVectorIntrinsic) {
4427       // Use vector version of the intrinsic.
4428       if (VF.isVector())
4429         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4430       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4431       assert(VectorF && "Can't retrieve vector intrinsic.");
4432     } else {
4433       // Use vector version of the function call.
4434       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4435 #ifndef NDEBUG
4436       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4437              "Can't create vector function.");
4438 #endif
4439         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4440     }
4441       SmallVector<OperandBundleDef, 1> OpBundles;
4442       CI->getOperandBundlesAsDefs(OpBundles);
4443       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4444 
4445       if (isa<FPMathOperator>(V))
4446         V->copyFastMathFlags(CI);
4447 
4448       State.set(Def, V, Part);
4449       addMetadata(V, &I);
4450   }
4451 }
4452 
4453 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4454   // We should not collect Scalars more than once per VF. Right now, this
4455   // function is called from collectUniformsAndScalars(), which already does
4456   // this check. Collecting Scalars for VF=1 does not make any sense.
4457   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4458          "This function should not be visited twice for the same VF");
4459 
4460   // This avoids any chances of creating a REPLICATE recipe during planning
4461   // since that would result in generation of scalarized code during execution,
4462   // which is not supported for scalable vectors.
4463   if (VF.isScalable()) {
4464     Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
4465     return;
4466   }
4467 
4468   SmallSetVector<Instruction *, 8> Worklist;
4469 
4470   // These sets are used to seed the analysis with pointers used by memory
4471   // accesses that will remain scalar.
4472   SmallSetVector<Instruction *, 8> ScalarPtrs;
4473   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4474   auto *Latch = TheLoop->getLoopLatch();
4475 
4476   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4477   // The pointer operands of loads and stores will be scalar as long as the
4478   // memory access is not a gather or scatter operation. The value operand of a
4479   // store will remain scalar if the store is scalarized.
4480   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4481     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4482     assert(WideningDecision != CM_Unknown &&
4483            "Widening decision should be ready at this moment");
4484     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4485       if (Ptr == Store->getValueOperand())
4486         return WideningDecision == CM_Scalarize;
4487     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4488            "Ptr is neither a value or pointer operand");
4489     return WideningDecision != CM_GatherScatter;
4490   };
4491 
4492   // A helper that returns true if the given value is a bitcast or
4493   // getelementptr instruction contained in the loop.
4494   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4495     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4496             isa<GetElementPtrInst>(V)) &&
4497            !TheLoop->isLoopInvariant(V);
4498   };
4499 
4500   // A helper that evaluates a memory access's use of a pointer. If the use will
4501   // be a scalar use and the pointer is only used by memory accesses, we place
4502   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4503   // PossibleNonScalarPtrs.
4504   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4505     // We only care about bitcast and getelementptr instructions contained in
4506     // the loop.
4507     if (!isLoopVaryingBitCastOrGEP(Ptr))
4508       return;
4509 
4510     // If the pointer has already been identified as scalar (e.g., if it was
4511     // also identified as uniform), there's nothing to do.
4512     auto *I = cast<Instruction>(Ptr);
4513     if (Worklist.count(I))
4514       return;
4515 
4516     // If the use of the pointer will be a scalar use, and all users of the
4517     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4518     // place the pointer in PossibleNonScalarPtrs.
4519     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4520           return isa<LoadInst>(U) || isa<StoreInst>(U);
4521         }))
4522       ScalarPtrs.insert(I);
4523     else
4524       PossibleNonScalarPtrs.insert(I);
4525   };
4526 
4527   // We seed the scalars analysis with three classes of instructions: (1)
4528   // instructions marked uniform-after-vectorization and (2) bitcast,
4529   // getelementptr and (pointer) phi instructions used by memory accesses
4530   // requiring a scalar use.
4531   //
4532   // (1) Add to the worklist all instructions that have been identified as
4533   // uniform-after-vectorization.
4534   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4535 
4536   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4537   // memory accesses requiring a scalar use. The pointer operands of loads and
4538   // stores will be scalar as long as the memory accesses is not a gather or
4539   // scatter operation. The value operand of a store will remain scalar if the
4540   // store is scalarized.
4541   for (auto *BB : TheLoop->blocks())
4542     for (auto &I : *BB) {
4543       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4544         evaluatePtrUse(Load, Load->getPointerOperand());
4545       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4546         evaluatePtrUse(Store, Store->getPointerOperand());
4547         evaluatePtrUse(Store, Store->getValueOperand());
4548       }
4549     }
4550   for (auto *I : ScalarPtrs)
4551     if (!PossibleNonScalarPtrs.count(I)) {
4552       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4553       Worklist.insert(I);
4554     }
4555 
4556   // Insert the forced scalars.
4557   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4558   // induction variable when the PHI user is scalarized.
4559   auto ForcedScalar = ForcedScalars.find(VF);
4560   if (ForcedScalar != ForcedScalars.end())
4561     for (auto *I : ForcedScalar->second)
4562       Worklist.insert(I);
4563 
4564   // Expand the worklist by looking through any bitcasts and getelementptr
4565   // instructions we've already identified as scalar. This is similar to the
4566   // expansion step in collectLoopUniforms(); however, here we're only
4567   // expanding to include additional bitcasts and getelementptr instructions.
4568   unsigned Idx = 0;
4569   while (Idx != Worklist.size()) {
4570     Instruction *Dst = Worklist[Idx++];
4571     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4572       continue;
4573     auto *Src = cast<Instruction>(Dst->getOperand(0));
4574     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4575           auto *J = cast<Instruction>(U);
4576           return !TheLoop->contains(J) || Worklist.count(J) ||
4577                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4578                   isScalarUse(J, Src));
4579         })) {
4580       Worklist.insert(Src);
4581       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4582     }
4583   }
4584 
4585   // An induction variable will remain scalar if all users of the induction
4586   // variable and induction variable update remain scalar.
4587   for (auto &Induction : Legal->getInductionVars()) {
4588     auto *Ind = Induction.first;
4589     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4590 
4591     // If tail-folding is applied, the primary induction variable will be used
4592     // to feed a vector compare.
4593     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4594       continue;
4595 
4596     // Returns true if \p Indvar is a pointer induction that is used directly by
4597     // load/store instruction \p I.
4598     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4599                                               Instruction *I) {
4600       return Induction.second.getKind() ==
4601                  InductionDescriptor::IK_PtrInduction &&
4602              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4603              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4604     };
4605 
4606     // Determine if all users of the induction variable are scalar after
4607     // vectorization.
4608     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4609       auto *I = cast<Instruction>(U);
4610       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4611              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4612     });
4613     if (!ScalarInd)
4614       continue;
4615 
4616     // Determine if all users of the induction variable update instruction are
4617     // scalar after vectorization.
4618     auto ScalarIndUpdate =
4619         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4620           auto *I = cast<Instruction>(U);
4621           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4622                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4623         });
4624     if (!ScalarIndUpdate)
4625       continue;
4626 
4627     // The induction variable and its update instruction will remain scalar.
4628     Worklist.insert(Ind);
4629     Worklist.insert(IndUpdate);
4630     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4631     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4632                       << "\n");
4633   }
4634 
4635   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4636 }
4637 
4638 bool LoopVectorizationCostModel::isScalarWithPredication(
4639     Instruction *I, ElementCount VF) const {
4640   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4641     return false;
4642   switch(I->getOpcode()) {
4643   default:
4644     break;
4645   case Instruction::Load:
4646   case Instruction::Store: {
4647     if (!Legal->isMaskRequired(I))
4648       return false;
4649     auto *Ptr = getLoadStorePointerOperand(I);
4650     auto *Ty = getLoadStoreType(I);
4651     Type *VTy = Ty;
4652     if (VF.isVector())
4653       VTy = VectorType::get(Ty, VF);
4654     const Align Alignment = getLoadStoreAlignment(I);
4655     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4656                                 TTI.isLegalMaskedGather(VTy, Alignment))
4657                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4658                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4659   }
4660   case Instruction::UDiv:
4661   case Instruction::SDiv:
4662   case Instruction::SRem:
4663   case Instruction::URem:
4664     return mayDivideByZero(*I);
4665   }
4666   return false;
4667 }
4668 
4669 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4670     Instruction *I, ElementCount VF) {
4671   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4672   assert(getWideningDecision(I, VF) == CM_Unknown &&
4673          "Decision should not be set yet.");
4674   auto *Group = getInterleavedAccessGroup(I);
4675   assert(Group && "Must have a group.");
4676 
4677   // If the instruction's allocated size doesn't equal it's type size, it
4678   // requires padding and will be scalarized.
4679   auto &DL = I->getModule()->getDataLayout();
4680   auto *ScalarTy = getLoadStoreType(I);
4681   if (hasIrregularType(ScalarTy, DL))
4682     return false;
4683 
4684   // Check if masking is required.
4685   // A Group may need masking for one of two reasons: it resides in a block that
4686   // needs predication, or it was decided to use masking to deal with gaps
4687   // (either a gap at the end of a load-access that may result in a speculative
4688   // load, or any gaps in a store-access).
4689   bool PredicatedAccessRequiresMasking =
4690       blockNeedsPredicationForAnyReason(I->getParent()) &&
4691       Legal->isMaskRequired(I);
4692   bool LoadAccessWithGapsRequiresEpilogMasking =
4693       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4694       !isScalarEpilogueAllowed();
4695   bool StoreAccessWithGapsRequiresMasking =
4696       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4697   if (!PredicatedAccessRequiresMasking &&
4698       !LoadAccessWithGapsRequiresEpilogMasking &&
4699       !StoreAccessWithGapsRequiresMasking)
4700     return true;
4701 
4702   // If masked interleaving is required, we expect that the user/target had
4703   // enabled it, because otherwise it either wouldn't have been created or
4704   // it should have been invalidated by the CostModel.
4705   assert(useMaskedInterleavedAccesses(TTI) &&
4706          "Masked interleave-groups for predicated accesses are not enabled.");
4707 
4708   if (Group->isReverse())
4709     return false;
4710 
4711   auto *Ty = getLoadStoreType(I);
4712   const Align Alignment = getLoadStoreAlignment(I);
4713   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4714                           : TTI.isLegalMaskedStore(Ty, Alignment);
4715 }
4716 
4717 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4718     Instruction *I, ElementCount VF) {
4719   // Get and ensure we have a valid memory instruction.
4720   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4721 
4722   auto *Ptr = getLoadStorePointerOperand(I);
4723   auto *ScalarTy = getLoadStoreType(I);
4724 
4725   // In order to be widened, the pointer should be consecutive, first of all.
4726   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4727     return false;
4728 
4729   // If the instruction is a store located in a predicated block, it will be
4730   // scalarized.
4731   if (isScalarWithPredication(I, VF))
4732     return false;
4733 
4734   // If the instruction's allocated size doesn't equal it's type size, it
4735   // requires padding and will be scalarized.
4736   auto &DL = I->getModule()->getDataLayout();
4737   if (hasIrregularType(ScalarTy, DL))
4738     return false;
4739 
4740   return true;
4741 }
4742 
4743 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4744   // We should not collect Uniforms more than once per VF. Right now,
4745   // this function is called from collectUniformsAndScalars(), which
4746   // already does this check. Collecting Uniforms for VF=1 does not make any
4747   // sense.
4748 
4749   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4750          "This function should not be visited twice for the same VF");
4751 
4752   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4753   // not analyze again.  Uniforms.count(VF) will return 1.
4754   Uniforms[VF].clear();
4755 
4756   // We now know that the loop is vectorizable!
4757   // Collect instructions inside the loop that will remain uniform after
4758   // vectorization.
4759 
4760   // Global values, params and instructions outside of current loop are out of
4761   // scope.
4762   auto isOutOfScope = [&](Value *V) -> bool {
4763     Instruction *I = dyn_cast<Instruction>(V);
4764     return (!I || !TheLoop->contains(I));
4765   };
4766 
4767   // Worklist containing uniform instructions demanding lane 0.
4768   SetVector<Instruction *> Worklist;
4769   BasicBlock *Latch = TheLoop->getLoopLatch();
4770 
4771   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4772   // that are scalar with predication must not be considered uniform after
4773   // vectorization, because that would create an erroneous replicating region
4774   // where only a single instance out of VF should be formed.
4775   // TODO: optimize such seldom cases if found important, see PR40816.
4776   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4777     if (isOutOfScope(I)) {
4778       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
4779                         << *I << "\n");
4780       return;
4781     }
4782     if (isScalarWithPredication(I, VF)) {
4783       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4784                         << *I << "\n");
4785       return;
4786     }
4787     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4788     Worklist.insert(I);
4789   };
4790 
4791   // Start with the conditional branch. If the branch condition is an
4792   // instruction contained in the loop that is only used by the branch, it is
4793   // uniform.
4794   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4795   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4796     addToWorklistIfAllowed(Cmp);
4797 
4798   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
4799     InstWidening WideningDecision = getWideningDecision(I, VF);
4800     assert(WideningDecision != CM_Unknown &&
4801            "Widening decision should be ready at this moment");
4802 
4803     // A uniform memory op is itself uniform.  We exclude uniform stores
4804     // here as they demand the last lane, not the first one.
4805     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
4806       assert(WideningDecision == CM_Scalarize);
4807       return true;
4808     }
4809 
4810     return (WideningDecision == CM_Widen ||
4811             WideningDecision == CM_Widen_Reverse ||
4812             WideningDecision == CM_Interleave);
4813   };
4814 
4815 
4816   // Returns true if Ptr is the pointer operand of a memory access instruction
4817   // I, and I is known to not require scalarization.
4818   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4819     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4820   };
4821 
4822   // Holds a list of values which are known to have at least one uniform use.
4823   // Note that there may be other uses which aren't uniform.  A "uniform use"
4824   // here is something which only demands lane 0 of the unrolled iterations;
4825   // it does not imply that all lanes produce the same value (e.g. this is not
4826   // the usual meaning of uniform)
4827   SetVector<Value *> HasUniformUse;
4828 
4829   // Scan the loop for instructions which are either a) known to have only
4830   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
4831   for (auto *BB : TheLoop->blocks())
4832     for (auto &I : *BB) {
4833       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
4834         switch (II->getIntrinsicID()) {
4835         case Intrinsic::sideeffect:
4836         case Intrinsic::experimental_noalias_scope_decl:
4837         case Intrinsic::assume:
4838         case Intrinsic::lifetime_start:
4839         case Intrinsic::lifetime_end:
4840           if (TheLoop->hasLoopInvariantOperands(&I))
4841             addToWorklistIfAllowed(&I);
4842           break;
4843         default:
4844           break;
4845         }
4846       }
4847 
4848       // ExtractValue instructions must be uniform, because the operands are
4849       // known to be loop-invariant.
4850       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
4851         assert(isOutOfScope(EVI->getAggregateOperand()) &&
4852                "Expected aggregate value to be loop invariant");
4853         addToWorklistIfAllowed(EVI);
4854         continue;
4855       }
4856 
4857       // If there's no pointer operand, there's nothing to do.
4858       auto *Ptr = getLoadStorePointerOperand(&I);
4859       if (!Ptr)
4860         continue;
4861 
4862       // A uniform memory op is itself uniform.  We exclude uniform stores
4863       // here as they demand the last lane, not the first one.
4864       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
4865         addToWorklistIfAllowed(&I);
4866 
4867       if (isUniformDecision(&I, VF)) {
4868         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
4869         HasUniformUse.insert(Ptr);
4870       }
4871     }
4872 
4873   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
4874   // demanding) users.  Since loops are assumed to be in LCSSA form, this
4875   // disallows uses outside the loop as well.
4876   for (auto *V : HasUniformUse) {
4877     if (isOutOfScope(V))
4878       continue;
4879     auto *I = cast<Instruction>(V);
4880     auto UsersAreMemAccesses =
4881       llvm::all_of(I->users(), [&](User *U) -> bool {
4882         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4883       });
4884     if (UsersAreMemAccesses)
4885       addToWorklistIfAllowed(I);
4886   }
4887 
4888   // Expand Worklist in topological order: whenever a new instruction
4889   // is added , its users should be already inside Worklist.  It ensures
4890   // a uniform instruction will only be used by uniform instructions.
4891   unsigned idx = 0;
4892   while (idx != Worklist.size()) {
4893     Instruction *I = Worklist[idx++];
4894 
4895     for (auto OV : I->operand_values()) {
4896       // isOutOfScope operands cannot be uniform instructions.
4897       if (isOutOfScope(OV))
4898         continue;
4899       // First order recurrence Phi's should typically be considered
4900       // non-uniform.
4901       auto *OP = dyn_cast<PHINode>(OV);
4902       if (OP && Legal->isFirstOrderRecurrence(OP))
4903         continue;
4904       // If all the users of the operand are uniform, then add the
4905       // operand into the uniform worklist.
4906       auto *OI = cast<Instruction>(OV);
4907       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4908             auto *J = cast<Instruction>(U);
4909             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4910           }))
4911         addToWorklistIfAllowed(OI);
4912     }
4913   }
4914 
4915   // For an instruction to be added into Worklist above, all its users inside
4916   // the loop should also be in Worklist. However, this condition cannot be
4917   // true for phi nodes that form a cyclic dependence. We must process phi
4918   // nodes separately. An induction variable will remain uniform if all users
4919   // of the induction variable and induction variable update remain uniform.
4920   // The code below handles both pointer and non-pointer induction variables.
4921   for (auto &Induction : Legal->getInductionVars()) {
4922     auto *Ind = Induction.first;
4923     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4924 
4925     // Determine if all users of the induction variable are uniform after
4926     // vectorization.
4927     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4928       auto *I = cast<Instruction>(U);
4929       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4930              isVectorizedMemAccessUse(I, Ind);
4931     });
4932     if (!UniformInd)
4933       continue;
4934 
4935     // Determine if all users of the induction variable update instruction are
4936     // uniform after vectorization.
4937     auto UniformIndUpdate =
4938         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4939           auto *I = cast<Instruction>(U);
4940           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4941                  isVectorizedMemAccessUse(I, IndUpdate);
4942         });
4943     if (!UniformIndUpdate)
4944       continue;
4945 
4946     // The induction variable and its update instruction will remain uniform.
4947     addToWorklistIfAllowed(Ind);
4948     addToWorklistIfAllowed(IndUpdate);
4949   }
4950 
4951   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4952 }
4953 
4954 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4955   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4956 
4957   if (Legal->getRuntimePointerChecking()->Need) {
4958     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4959         "runtime pointer checks needed. Enable vectorization of this "
4960         "loop with '#pragma clang loop vectorize(enable)' when "
4961         "compiling with -Os/-Oz",
4962         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4963     return true;
4964   }
4965 
4966   if (!PSE.getPredicate().isAlwaysTrue()) {
4967     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4968         "runtime SCEV checks needed. Enable vectorization of this "
4969         "loop with '#pragma clang loop vectorize(enable)' when "
4970         "compiling with -Os/-Oz",
4971         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4972     return true;
4973   }
4974 
4975   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4976   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4977     reportVectorizationFailure("Runtime stride check for small trip count",
4978         "runtime stride == 1 checks needed. Enable vectorization of "
4979         "this loop without such check by compiling with -Os/-Oz",
4980         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4981     return true;
4982   }
4983 
4984   return false;
4985 }
4986 
4987 ElementCount
4988 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
4989   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
4990     return ElementCount::getScalable(0);
4991 
4992   if (Hints->isScalableVectorizationDisabled()) {
4993     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
4994                             "ScalableVectorizationDisabled", ORE, TheLoop);
4995     return ElementCount::getScalable(0);
4996   }
4997 
4998   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
4999 
5000   auto MaxScalableVF = ElementCount::getScalable(
5001       std::numeric_limits<ElementCount::ScalarTy>::max());
5002 
5003   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5004   // FIXME: While for scalable vectors this is currently sufficient, this should
5005   // be replaced by a more detailed mechanism that filters out specific VFs,
5006   // instead of invalidating vectorization for a whole set of VFs based on the
5007   // MaxVF.
5008 
5009   // Disable scalable vectorization if the loop contains unsupported reductions.
5010   if (!canVectorizeReductions(MaxScalableVF)) {
5011     reportVectorizationInfo(
5012         "Scalable vectorization not supported for the reduction "
5013         "operations found in this loop.",
5014         "ScalableVFUnfeasible", ORE, TheLoop);
5015     return ElementCount::getScalable(0);
5016   }
5017 
5018   // Disable scalable vectorization if the loop contains any instructions
5019   // with element types not supported for scalable vectors.
5020   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5021         return !Ty->isVoidTy() &&
5022                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5023       })) {
5024     reportVectorizationInfo("Scalable vectorization is not supported "
5025                             "for all element types found in this loop.",
5026                             "ScalableVFUnfeasible", ORE, TheLoop);
5027     return ElementCount::getScalable(0);
5028   }
5029 
5030   if (Legal->isSafeForAnyVectorWidth())
5031     return MaxScalableVF;
5032 
5033   // Limit MaxScalableVF by the maximum safe dependence distance.
5034   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5035   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5036     MaxVScale =
5037         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5038   MaxScalableVF = ElementCount::getScalable(
5039       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5040   if (!MaxScalableVF)
5041     reportVectorizationInfo(
5042         "Max legal vector width too small, scalable vectorization "
5043         "unfeasible.",
5044         "ScalableVFUnfeasible", ORE, TheLoop);
5045 
5046   return MaxScalableVF;
5047 }
5048 
5049 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5050     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5051   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5052   unsigned SmallestType, WidestType;
5053   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5054 
5055   // Get the maximum safe dependence distance in bits computed by LAA.
5056   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5057   // the memory accesses that is most restrictive (involved in the smallest
5058   // dependence distance).
5059   unsigned MaxSafeElements =
5060       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5061 
5062   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5063   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5064 
5065   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5066                     << ".\n");
5067   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5068                     << ".\n");
5069 
5070   // First analyze the UserVF, fall back if the UserVF should be ignored.
5071   if (UserVF) {
5072     auto MaxSafeUserVF =
5073         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5074 
5075     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5076       // If `VF=vscale x N` is safe, then so is `VF=N`
5077       if (UserVF.isScalable())
5078         return FixedScalableVFPair(
5079             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5080       else
5081         return UserVF;
5082     }
5083 
5084     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5085 
5086     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5087     // is better to ignore the hint and let the compiler choose a suitable VF.
5088     if (!UserVF.isScalable()) {
5089       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5090                         << " is unsafe, clamping to max safe VF="
5091                         << MaxSafeFixedVF << ".\n");
5092       ORE->emit([&]() {
5093         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5094                                           TheLoop->getStartLoc(),
5095                                           TheLoop->getHeader())
5096                << "User-specified vectorization factor "
5097                << ore::NV("UserVectorizationFactor", UserVF)
5098                << " is unsafe, clamping to maximum safe vectorization factor "
5099                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5100       });
5101       return MaxSafeFixedVF;
5102     }
5103 
5104     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5105       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5106                         << " is ignored because scalable vectors are not "
5107                            "available.\n");
5108       ORE->emit([&]() {
5109         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5110                                           TheLoop->getStartLoc(),
5111                                           TheLoop->getHeader())
5112                << "User-specified vectorization factor "
5113                << ore::NV("UserVectorizationFactor", UserVF)
5114                << " is ignored because the target does not support scalable "
5115                   "vectors. The compiler will pick a more suitable value.";
5116       });
5117     } else {
5118       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5119                         << " is unsafe. Ignoring scalable UserVF.\n");
5120       ORE->emit([&]() {
5121         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5122                                           TheLoop->getStartLoc(),
5123                                           TheLoop->getHeader())
5124                << "User-specified vectorization factor "
5125                << ore::NV("UserVectorizationFactor", UserVF)
5126                << " is unsafe. Ignoring the hint to let the compiler pick a "
5127                   "more suitable value.";
5128       });
5129     }
5130   }
5131 
5132   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5133                     << " / " << WidestType << " bits.\n");
5134 
5135   FixedScalableVFPair Result(ElementCount::getFixed(1),
5136                              ElementCount::getScalable(0));
5137   if (auto MaxVF =
5138           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5139                                   MaxSafeFixedVF, FoldTailByMasking))
5140     Result.FixedVF = MaxVF;
5141 
5142   if (auto MaxVF =
5143           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5144                                   MaxSafeScalableVF, FoldTailByMasking))
5145     if (MaxVF.isScalable()) {
5146       Result.ScalableVF = MaxVF;
5147       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5148                         << "\n");
5149     }
5150 
5151   return Result;
5152 }
5153 
5154 FixedScalableVFPair
5155 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5156   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5157     // TODO: It may by useful to do since it's still likely to be dynamically
5158     // uniform if the target can skip.
5159     reportVectorizationFailure(
5160         "Not inserting runtime ptr check for divergent target",
5161         "runtime pointer checks needed. Not enabled for divergent target",
5162         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5163     return FixedScalableVFPair::getNone();
5164   }
5165 
5166   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5167   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5168   if (TC == 1) {
5169     reportVectorizationFailure("Single iteration (non) loop",
5170         "loop trip count is one, irrelevant for vectorization",
5171         "SingleIterationLoop", ORE, TheLoop);
5172     return FixedScalableVFPair::getNone();
5173   }
5174 
5175   switch (ScalarEpilogueStatus) {
5176   case CM_ScalarEpilogueAllowed:
5177     return computeFeasibleMaxVF(TC, UserVF, false);
5178   case CM_ScalarEpilogueNotAllowedUsePredicate:
5179     LLVM_FALLTHROUGH;
5180   case CM_ScalarEpilogueNotNeededUsePredicate:
5181     LLVM_DEBUG(
5182         dbgs() << "LV: vector predicate hint/switch found.\n"
5183                << "LV: Not allowing scalar epilogue, creating predicated "
5184                << "vector loop.\n");
5185     break;
5186   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5187     // fallthrough as a special case of OptForSize
5188   case CM_ScalarEpilogueNotAllowedOptSize:
5189     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5190       LLVM_DEBUG(
5191           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5192     else
5193       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5194                         << "count.\n");
5195 
5196     // Bail if runtime checks are required, which are not good when optimising
5197     // for size.
5198     if (runtimeChecksRequired())
5199       return FixedScalableVFPair::getNone();
5200 
5201     break;
5202   }
5203 
5204   // The only loops we can vectorize without a scalar epilogue, are loops with
5205   // a bottom-test and a single exiting block. We'd have to handle the fact
5206   // that not every instruction executes on the last iteration.  This will
5207   // require a lane mask which varies through the vector loop body.  (TODO)
5208   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5209     // If there was a tail-folding hint/switch, but we can't fold the tail by
5210     // masking, fallback to a vectorization with a scalar epilogue.
5211     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5212       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5213                            "scalar epilogue instead.\n");
5214       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5215       return computeFeasibleMaxVF(TC, UserVF, false);
5216     }
5217     return FixedScalableVFPair::getNone();
5218   }
5219 
5220   // Now try the tail folding
5221 
5222   // Invalidate interleave groups that require an epilogue if we can't mask
5223   // the interleave-group.
5224   if (!useMaskedInterleavedAccesses(TTI)) {
5225     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5226            "No decisions should have been taken at this point");
5227     // Note: There is no need to invalidate any cost modeling decisions here, as
5228     // non where taken so far.
5229     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5230   }
5231 
5232   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5233   // Avoid tail folding if the trip count is known to be a multiple of any VF
5234   // we chose.
5235   // FIXME: The condition below pessimises the case for fixed-width vectors,
5236   // when scalable VFs are also candidates for vectorization.
5237   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5238     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5239     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5240            "MaxFixedVF must be a power of 2");
5241     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5242                                    : MaxFixedVF.getFixedValue();
5243     ScalarEvolution *SE = PSE.getSE();
5244     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5245     const SCEV *ExitCount = SE->getAddExpr(
5246         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5247     const SCEV *Rem = SE->getURemExpr(
5248         SE->applyLoopGuards(ExitCount, TheLoop),
5249         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5250     if (Rem->isZero()) {
5251       // Accept MaxFixedVF if we do not have a tail.
5252       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5253       return MaxFactors;
5254     }
5255   }
5256 
5257   // For scalable vectors don't use tail folding for low trip counts or
5258   // optimizing for code size. We only permit this if the user has explicitly
5259   // requested it.
5260   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5261       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5262       MaxFactors.ScalableVF.isVector())
5263     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5264 
5265   // If we don't know the precise trip count, or if the trip count that we
5266   // found modulo the vectorization factor is not zero, try to fold the tail
5267   // by masking.
5268   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5269   if (Legal->prepareToFoldTailByMasking()) {
5270     FoldTailByMasking = true;
5271     return MaxFactors;
5272   }
5273 
5274   // If there was a tail-folding hint/switch, but we can't fold the tail by
5275   // masking, fallback to a vectorization with a scalar epilogue.
5276   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5277     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5278                          "scalar epilogue instead.\n");
5279     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5280     return MaxFactors;
5281   }
5282 
5283   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5284     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5285     return FixedScalableVFPair::getNone();
5286   }
5287 
5288   if (TC == 0) {
5289     reportVectorizationFailure(
5290         "Unable to calculate the loop count due to complex control flow",
5291         "unable to calculate the loop count due to complex control flow",
5292         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5293     return FixedScalableVFPair::getNone();
5294   }
5295 
5296   reportVectorizationFailure(
5297       "Cannot optimize for size and vectorize at the same time.",
5298       "cannot optimize for size and vectorize at the same time. "
5299       "Enable vectorization of this loop with '#pragma clang loop "
5300       "vectorize(enable)' when compiling with -Os/-Oz",
5301       "NoTailLoopWithOptForSize", ORE, TheLoop);
5302   return FixedScalableVFPair::getNone();
5303 }
5304 
5305 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5306     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5307     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5308   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5309   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5310       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5311                            : TargetTransformInfo::RGK_FixedWidthVector);
5312 
5313   // Convenience function to return the minimum of two ElementCounts.
5314   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5315     assert((LHS.isScalable() == RHS.isScalable()) &&
5316            "Scalable flags must match");
5317     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5318   };
5319 
5320   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5321   // Note that both WidestRegister and WidestType may not be a powers of 2.
5322   auto MaxVectorElementCount = ElementCount::get(
5323       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5324       ComputeScalableMaxVF);
5325   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5326   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5327                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5328 
5329   if (!MaxVectorElementCount) {
5330     LLVM_DEBUG(dbgs() << "LV: The target has no "
5331                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5332                       << " vector registers.\n");
5333     return ElementCount::getFixed(1);
5334   }
5335 
5336   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5337   if (ConstTripCount &&
5338       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5339       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5340     // If loop trip count (TC) is known at compile time there is no point in
5341     // choosing VF greater than TC (as done in the loop below). Select maximum
5342     // power of two which doesn't exceed TC.
5343     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5344     // when the TC is less than or equal to the known number of lanes.
5345     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5346     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5347                          "exceeding the constant trip count: "
5348                       << ClampedConstTripCount << "\n");
5349     return ElementCount::getFixed(ClampedConstTripCount);
5350   }
5351 
5352   ElementCount MaxVF = MaxVectorElementCount;
5353   if (TTI.shouldMaximizeVectorBandwidth() ||
5354       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5355     auto MaxVectorElementCountMaxBW = ElementCount::get(
5356         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5357         ComputeScalableMaxVF);
5358     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5359 
5360     // Collect all viable vectorization factors larger than the default MaxVF
5361     // (i.e. MaxVectorElementCount).
5362     SmallVector<ElementCount, 8> VFs;
5363     for (ElementCount VS = MaxVectorElementCount * 2;
5364          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5365       VFs.push_back(VS);
5366 
5367     // For each VF calculate its register usage.
5368     auto RUs = calculateRegisterUsage(VFs);
5369 
5370     // Select the largest VF which doesn't require more registers than existing
5371     // ones.
5372     for (int i = RUs.size() - 1; i >= 0; --i) {
5373       bool Selected = true;
5374       for (auto &pair : RUs[i].MaxLocalUsers) {
5375         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5376         if (pair.second > TargetNumRegisters)
5377           Selected = false;
5378       }
5379       if (Selected) {
5380         MaxVF = VFs[i];
5381         break;
5382       }
5383     }
5384     if (ElementCount MinVF =
5385             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5386       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5387         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5388                           << ") with target's minimum: " << MinVF << '\n');
5389         MaxVF = MinVF;
5390       }
5391     }
5392   }
5393   return MaxVF;
5394 }
5395 
5396 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
5397   if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5398     auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
5399     auto Min = Attr.getVScaleRangeMin();
5400     auto Max = Attr.getVScaleRangeMax();
5401     if (Max && Min == Max)
5402       return Max;
5403   }
5404 
5405   return TTI.getVScaleForTuning();
5406 }
5407 
5408 bool LoopVectorizationCostModel::isMoreProfitable(
5409     const VectorizationFactor &A, const VectorizationFactor &B) const {
5410   InstructionCost CostA = A.Cost;
5411   InstructionCost CostB = B.Cost;
5412 
5413   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5414 
5415   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5416       MaxTripCount) {
5417     // If we are folding the tail and the trip count is a known (possibly small)
5418     // constant, the trip count will be rounded up to an integer number of
5419     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5420     // which we compare directly. When not folding the tail, the total cost will
5421     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5422     // approximated with the per-lane cost below instead of using the tripcount
5423     // as here.
5424     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5425     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5426     return RTCostA < RTCostB;
5427   }
5428 
5429   // Improve estimate for the vector width if it is scalable.
5430   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5431   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5432   if (Optional<unsigned> VScale = getVScaleForTuning()) {
5433     if (A.Width.isScalable())
5434       EstimatedWidthA *= VScale.getValue();
5435     if (B.Width.isScalable())
5436       EstimatedWidthB *= VScale.getValue();
5437   }
5438 
5439   // Assume vscale may be larger than 1 (or the value being tuned for),
5440   // so that scalable vectorization is slightly favorable over fixed-width
5441   // vectorization.
5442   if (A.Width.isScalable() && !B.Width.isScalable())
5443     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5444 
5445   // To avoid the need for FP division:
5446   //      (CostA / A.Width) < (CostB / B.Width)
5447   // <=>  (CostA * B.Width) < (CostB * A.Width)
5448   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5449 }
5450 
5451 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5452     const ElementCountSet &VFCandidates) {
5453   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5454   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5455   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5456   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5457          "Expected Scalar VF to be a candidate");
5458 
5459   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5460   VectorizationFactor ChosenFactor = ScalarCost;
5461 
5462   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5463   if (ForceVectorization && VFCandidates.size() > 1) {
5464     // Ignore scalar width, because the user explicitly wants vectorization.
5465     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5466     // evaluation.
5467     ChosenFactor.Cost = InstructionCost::getMax();
5468   }
5469 
5470   SmallVector<InstructionVFPair> InvalidCosts;
5471   for (const auto &i : VFCandidates) {
5472     // The cost for scalar VF=1 is already calculated, so ignore it.
5473     if (i.isScalar())
5474       continue;
5475 
5476     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5477     VectorizationFactor Candidate(i, C.first);
5478 
5479 #ifndef NDEBUG
5480     unsigned AssumedMinimumVscale = 1;
5481     if (Optional<unsigned> VScale = getVScaleForTuning())
5482       AssumedMinimumVscale = VScale.getValue();
5483     unsigned Width =
5484         Candidate.Width.isScalable()
5485             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5486             : Candidate.Width.getFixedValue();
5487     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5488                       << " costs: " << (Candidate.Cost / Width));
5489     if (i.isScalable())
5490       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5491                         << AssumedMinimumVscale << ")");
5492     LLVM_DEBUG(dbgs() << ".\n");
5493 #endif
5494 
5495     if (!C.second && !ForceVectorization) {
5496       LLVM_DEBUG(
5497           dbgs() << "LV: Not considering vector loop of width " << i
5498                  << " because it will not generate any vector instructions.\n");
5499       continue;
5500     }
5501 
5502     // If profitable add it to ProfitableVF list.
5503     if (isMoreProfitable(Candidate, ScalarCost))
5504       ProfitableVFs.push_back(Candidate);
5505 
5506     if (isMoreProfitable(Candidate, ChosenFactor))
5507       ChosenFactor = Candidate;
5508   }
5509 
5510   // Emit a report of VFs with invalid costs in the loop.
5511   if (!InvalidCosts.empty()) {
5512     // Group the remarks per instruction, keeping the instruction order from
5513     // InvalidCosts.
5514     std::map<Instruction *, unsigned> Numbering;
5515     unsigned I = 0;
5516     for (auto &Pair : InvalidCosts)
5517       if (!Numbering.count(Pair.first))
5518         Numbering[Pair.first] = I++;
5519 
5520     // Sort the list, first on instruction(number) then on VF.
5521     llvm::sort(InvalidCosts,
5522                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5523                  if (Numbering[A.first] != Numbering[B.first])
5524                    return Numbering[A.first] < Numbering[B.first];
5525                  ElementCountComparator ECC;
5526                  return ECC(A.second, B.second);
5527                });
5528 
5529     // For a list of ordered instruction-vf pairs:
5530     //   [(load, vf1), (load, vf2), (store, vf1)]
5531     // Group the instructions together to emit separate remarks for:
5532     //   load  (vf1, vf2)
5533     //   store (vf1)
5534     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5535     auto Subset = ArrayRef<InstructionVFPair>();
5536     do {
5537       if (Subset.empty())
5538         Subset = Tail.take_front(1);
5539 
5540       Instruction *I = Subset.front().first;
5541 
5542       // If the next instruction is different, or if there are no other pairs,
5543       // emit a remark for the collated subset. e.g.
5544       //   [(load, vf1), (load, vf2))]
5545       // to emit:
5546       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5547       if (Subset == Tail || Tail[Subset.size()].first != I) {
5548         std::string OutString;
5549         raw_string_ostream OS(OutString);
5550         assert(!Subset.empty() && "Unexpected empty range");
5551         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5552         for (auto &Pair : Subset)
5553           OS << (Pair.second == Subset.front().second ? "" : ", ")
5554              << Pair.second;
5555         OS << "):";
5556         if (auto *CI = dyn_cast<CallInst>(I))
5557           OS << " call to " << CI->getCalledFunction()->getName();
5558         else
5559           OS << " " << I->getOpcodeName();
5560         OS.flush();
5561         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5562         Tail = Tail.drop_front(Subset.size());
5563         Subset = {};
5564       } else
5565         // Grow the subset by one element
5566         Subset = Tail.take_front(Subset.size() + 1);
5567     } while (!Tail.empty());
5568   }
5569 
5570   if (!EnableCondStoresVectorization && NumPredStores) {
5571     reportVectorizationFailure("There are conditional stores.",
5572         "store that is conditionally executed prevents vectorization",
5573         "ConditionalStore", ORE, TheLoop);
5574     ChosenFactor = ScalarCost;
5575   }
5576 
5577   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5578                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5579              << "LV: Vectorization seems to be not beneficial, "
5580              << "but was forced by a user.\n");
5581   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5582   return ChosenFactor;
5583 }
5584 
5585 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5586     const Loop &L, ElementCount VF) const {
5587   // Cross iteration phis such as reductions need special handling and are
5588   // currently unsupported.
5589   if (any_of(L.getHeader()->phis(),
5590              [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
5591     return false;
5592 
5593   // Phis with uses outside of the loop require special handling and are
5594   // currently unsupported.
5595   for (auto &Entry : Legal->getInductionVars()) {
5596     // Look for uses of the value of the induction at the last iteration.
5597     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5598     for (User *U : PostInc->users())
5599       if (!L.contains(cast<Instruction>(U)))
5600         return false;
5601     // Look for uses of penultimate value of the induction.
5602     for (User *U : Entry.first->users())
5603       if (!L.contains(cast<Instruction>(U)))
5604         return false;
5605   }
5606 
5607   // Induction variables that are widened require special handling that is
5608   // currently not supported.
5609   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5610         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5611                  this->isProfitableToScalarize(Entry.first, VF));
5612       }))
5613     return false;
5614 
5615   // Epilogue vectorization code has not been auditted to ensure it handles
5616   // non-latch exits properly.  It may be fine, but it needs auditted and
5617   // tested.
5618   if (L.getExitingBlock() != L.getLoopLatch())
5619     return false;
5620 
5621   return true;
5622 }
5623 
5624 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5625     const ElementCount VF) const {
5626   // FIXME: We need a much better cost-model to take different parameters such
5627   // as register pressure, code size increase and cost of extra branches into
5628   // account. For now we apply a very crude heuristic and only consider loops
5629   // with vectorization factors larger than a certain value.
5630   // We also consider epilogue vectorization unprofitable for targets that don't
5631   // consider interleaving beneficial (eg. MVE).
5632   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5633     return false;
5634   // FIXME: We should consider changing the threshold for scalable
5635   // vectors to take VScaleForTuning into account.
5636   if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
5637     return true;
5638   return false;
5639 }
5640 
5641 VectorizationFactor
5642 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5643     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5644   VectorizationFactor Result = VectorizationFactor::Disabled();
5645   if (!EnableEpilogueVectorization) {
5646     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5647     return Result;
5648   }
5649 
5650   if (!isScalarEpilogueAllowed()) {
5651     LLVM_DEBUG(
5652         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5653                   "allowed.\n";);
5654     return Result;
5655   }
5656 
5657   // Not really a cost consideration, but check for unsupported cases here to
5658   // simplify the logic.
5659   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5660     LLVM_DEBUG(
5661         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5662                   "not a supported candidate.\n";);
5663     return Result;
5664   }
5665 
5666   if (EpilogueVectorizationForceVF > 1) {
5667     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5668     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5669     if (LVP.hasPlanWithVF(ForcedEC))
5670       return {ForcedEC, 0};
5671     else {
5672       LLVM_DEBUG(
5673           dbgs()
5674               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5675       return Result;
5676     }
5677   }
5678 
5679   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5680       TheLoop->getHeader()->getParent()->hasMinSize()) {
5681     LLVM_DEBUG(
5682         dbgs()
5683             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5684     return Result;
5685   }
5686 
5687   if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
5688     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5689                          "this loop\n");
5690     return Result;
5691   }
5692 
5693   // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
5694   // the main loop handles 8 lanes per iteration. We could still benefit from
5695   // vectorizing the epilogue loop with VF=4.
5696   ElementCount EstimatedRuntimeVF = MainLoopVF;
5697   if (MainLoopVF.isScalable()) {
5698     EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5699     if (Optional<unsigned> VScale = getVScaleForTuning())
5700       EstimatedRuntimeVF *= VScale.getValue();
5701   }
5702 
5703   for (auto &NextVF : ProfitableVFs)
5704     if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
5705           ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
5706          ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
5707         (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
5708         LVP.hasPlanWithVF(NextVF.Width))
5709       Result = NextVF;
5710 
5711   if (Result != VectorizationFactor::Disabled())
5712     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5713                       << Result.Width << "\n";);
5714   return Result;
5715 }
5716 
5717 std::pair<unsigned, unsigned>
5718 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5719   unsigned MinWidth = -1U;
5720   unsigned MaxWidth = 8;
5721   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5722   // For in-loop reductions, no element types are added to ElementTypesInLoop
5723   // if there are no loads/stores in the loop. In this case, check through the
5724   // reduction variables to determine the maximum width.
5725   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5726     // Reset MaxWidth so that we can find the smallest type used by recurrences
5727     // in the loop.
5728     MaxWidth = -1U;
5729     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5730       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5731       // When finding the min width used by the recurrence we need to account
5732       // for casts on the input operands of the recurrence.
5733       MaxWidth = std::min<unsigned>(
5734           MaxWidth, std::min<unsigned>(
5735                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5736                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5737     }
5738   } else {
5739     for (Type *T : ElementTypesInLoop) {
5740       MinWidth = std::min<unsigned>(
5741           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5742       MaxWidth = std::max<unsigned>(
5743           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5744     }
5745   }
5746   return {MinWidth, MaxWidth};
5747 }
5748 
5749 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5750   ElementTypesInLoop.clear();
5751   // For each block.
5752   for (BasicBlock *BB : TheLoop->blocks()) {
5753     // For each instruction in the loop.
5754     for (Instruction &I : BB->instructionsWithoutDebug()) {
5755       Type *T = I.getType();
5756 
5757       // Skip ignored values.
5758       if (ValuesToIgnore.count(&I))
5759         continue;
5760 
5761       // Only examine Loads, Stores and PHINodes.
5762       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5763         continue;
5764 
5765       // Examine PHI nodes that are reduction variables. Update the type to
5766       // account for the recurrence type.
5767       if (auto *PN = dyn_cast<PHINode>(&I)) {
5768         if (!Legal->isReductionVariable(PN))
5769           continue;
5770         const RecurrenceDescriptor &RdxDesc =
5771             Legal->getReductionVars().find(PN)->second;
5772         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5773             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5774                                       RdxDesc.getRecurrenceType(),
5775                                       TargetTransformInfo::ReductionFlags()))
5776           continue;
5777         T = RdxDesc.getRecurrenceType();
5778       }
5779 
5780       // Examine the stored values.
5781       if (auto *ST = dyn_cast<StoreInst>(&I))
5782         T = ST->getValueOperand()->getType();
5783 
5784       assert(T->isSized() &&
5785              "Expected the load/store/recurrence type to be sized");
5786 
5787       ElementTypesInLoop.insert(T);
5788     }
5789   }
5790 }
5791 
5792 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5793                                                            unsigned LoopCost) {
5794   // -- The interleave heuristics --
5795   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5796   // There are many micro-architectural considerations that we can't predict
5797   // at this level. For example, frontend pressure (on decode or fetch) due to
5798   // code size, or the number and capabilities of the execution ports.
5799   //
5800   // We use the following heuristics to select the interleave count:
5801   // 1. If the code has reductions, then we interleave to break the cross
5802   // iteration dependency.
5803   // 2. If the loop is really small, then we interleave to reduce the loop
5804   // overhead.
5805   // 3. We don't interleave if we think that we will spill registers to memory
5806   // due to the increased register pressure.
5807 
5808   if (!isScalarEpilogueAllowed())
5809     return 1;
5810 
5811   // We used the distance for the interleave count.
5812   if (Legal->getMaxSafeDepDistBytes() != -1U)
5813     return 1;
5814 
5815   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5816   const bool HasReductions = !Legal->getReductionVars().empty();
5817   // Do not interleave loops with a relatively small known or estimated trip
5818   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5819   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5820   // because with the above conditions interleaving can expose ILP and break
5821   // cross iteration dependences for reductions.
5822   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5823       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5824     return 1;
5825 
5826   RegisterUsage R = calculateRegisterUsage({VF})[0];
5827   // We divide by these constants so assume that we have at least one
5828   // instruction that uses at least one register.
5829   for (auto& pair : R.MaxLocalUsers) {
5830     pair.second = std::max(pair.second, 1U);
5831   }
5832 
5833   // We calculate the interleave count using the following formula.
5834   // Subtract the number of loop invariants from the number of available
5835   // registers. These registers are used by all of the interleaved instances.
5836   // Next, divide the remaining registers by the number of registers that is
5837   // required by the loop, in order to estimate how many parallel instances
5838   // fit without causing spills. All of this is rounded down if necessary to be
5839   // a power of two. We want power of two interleave count to simplify any
5840   // addressing operations or alignment considerations.
5841   // We also want power of two interleave counts to ensure that the induction
5842   // variable of the vector loop wraps to zero, when tail is folded by masking;
5843   // this currently happens when OptForSize, in which case IC is set to 1 above.
5844   unsigned IC = UINT_MAX;
5845 
5846   for (auto& pair : R.MaxLocalUsers) {
5847     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5848     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5849                       << " registers of "
5850                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5851     if (VF.isScalar()) {
5852       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5853         TargetNumRegisters = ForceTargetNumScalarRegs;
5854     } else {
5855       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5856         TargetNumRegisters = ForceTargetNumVectorRegs;
5857     }
5858     unsigned MaxLocalUsers = pair.second;
5859     unsigned LoopInvariantRegs = 0;
5860     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5861       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5862 
5863     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5864     // Don't count the induction variable as interleaved.
5865     if (EnableIndVarRegisterHeur) {
5866       TmpIC =
5867           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5868                         std::max(1U, (MaxLocalUsers - 1)));
5869     }
5870 
5871     IC = std::min(IC, TmpIC);
5872   }
5873 
5874   // Clamp the interleave ranges to reasonable counts.
5875   unsigned MaxInterleaveCount =
5876       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
5877 
5878   // Check if the user has overridden the max.
5879   if (VF.isScalar()) {
5880     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5881       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5882   } else {
5883     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5884       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5885   }
5886 
5887   // If trip count is known or estimated compile time constant, limit the
5888   // interleave count to be less than the trip count divided by VF, provided it
5889   // is at least 1.
5890   //
5891   // For scalable vectors we can't know if interleaving is beneficial. It may
5892   // not be beneficial for small loops if none of the lanes in the second vector
5893   // iterations is enabled. However, for larger loops, there is likely to be a
5894   // similar benefit as for fixed-width vectors. For now, we choose to leave
5895   // the InterleaveCount as if vscale is '1', although if some information about
5896   // the vector is known (e.g. min vector size), we can make a better decision.
5897   if (BestKnownTC) {
5898     MaxInterleaveCount =
5899         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
5900     // Make sure MaxInterleaveCount is greater than 0.
5901     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
5902   }
5903 
5904   assert(MaxInterleaveCount > 0 &&
5905          "Maximum interleave count must be greater than 0");
5906 
5907   // Clamp the calculated IC to be between the 1 and the max interleave count
5908   // that the target and trip count allows.
5909   if (IC > MaxInterleaveCount)
5910     IC = MaxInterleaveCount;
5911   else
5912     // Make sure IC is greater than 0.
5913     IC = std::max(1u, IC);
5914 
5915   assert(IC > 0 && "Interleave count must be greater than 0.");
5916 
5917   // If we did not calculate the cost for VF (because the user selected the VF)
5918   // then we calculate the cost of VF here.
5919   if (LoopCost == 0) {
5920     InstructionCost C = expectedCost(VF).first;
5921     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
5922     LoopCost = *C.getValue();
5923   }
5924 
5925   assert(LoopCost && "Non-zero loop cost expected");
5926 
5927   // Interleave if we vectorized this loop and there is a reduction that could
5928   // benefit from interleaving.
5929   if (VF.isVector() && HasReductions) {
5930     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5931     return IC;
5932   }
5933 
5934   // For any scalar loop that either requires runtime checks or predication we
5935   // are better off leaving this to the unroller. Note that if we've already
5936   // vectorized the loop we will have done the runtime check and so interleaving
5937   // won't require further checks.
5938   bool ScalarInterleavingRequiresPredication =
5939       (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
5940          return Legal->blockNeedsPredication(BB);
5941        }));
5942   bool ScalarInterleavingRequiresRuntimePointerCheck =
5943       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
5944 
5945   // We want to interleave small loops in order to reduce the loop overhead and
5946   // potentially expose ILP opportunities.
5947   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
5948                     << "LV: IC is " << IC << '\n'
5949                     << "LV: VF is " << VF << '\n');
5950   const bool AggressivelyInterleaveReductions =
5951       TTI.enableAggressiveInterleaving(HasReductions);
5952   if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5953       !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
5954     // We assume that the cost overhead is 1 and we use the cost model
5955     // to estimate the cost of the loop and interleave until the cost of the
5956     // loop overhead is about 5% of the cost of the loop.
5957     unsigned SmallIC =
5958         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5959 
5960     // Interleave until store/load ports (estimated by max interleave count) are
5961     // saturated.
5962     unsigned NumStores = Legal->getNumStores();
5963     unsigned NumLoads = Legal->getNumLoads();
5964     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5965     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5966 
5967     // There is little point in interleaving for reductions containing selects
5968     // and compares when VF=1 since it may just create more overhead than it's
5969     // worth for loops with small trip counts. This is because we still have to
5970     // do the final reduction after the loop.
5971     bool HasSelectCmpReductions =
5972         HasReductions &&
5973         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5974           const RecurrenceDescriptor &RdxDesc = Reduction.second;
5975           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
5976               RdxDesc.getRecurrenceKind());
5977         });
5978     if (HasSelectCmpReductions) {
5979       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
5980       return 1;
5981     }
5982 
5983     // If we have a scalar reduction (vector reductions are already dealt with
5984     // by this point), we can increase the critical path length if the loop
5985     // we're interleaving is inside another loop. For tree-wise reductions
5986     // set the limit to 2, and for ordered reductions it's best to disable
5987     // interleaving entirely.
5988     if (HasReductions && TheLoop->getLoopDepth() > 1) {
5989       bool HasOrderedReductions =
5990           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5991             const RecurrenceDescriptor &RdxDesc = Reduction.second;
5992             return RdxDesc.isOrdered();
5993           });
5994       if (HasOrderedReductions) {
5995         LLVM_DEBUG(
5996             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
5997         return 1;
5998       }
5999 
6000       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6001       SmallIC = std::min(SmallIC, F);
6002       StoresIC = std::min(StoresIC, F);
6003       LoadsIC = std::min(LoadsIC, F);
6004     }
6005 
6006     if (EnableLoadStoreRuntimeInterleave &&
6007         std::max(StoresIC, LoadsIC) > SmallIC) {
6008       LLVM_DEBUG(
6009           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6010       return std::max(StoresIC, LoadsIC);
6011     }
6012 
6013     // If there are scalar reductions and TTI has enabled aggressive
6014     // interleaving for reductions, we will interleave to expose ILP.
6015     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6016         AggressivelyInterleaveReductions) {
6017       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6018       // Interleave no less than SmallIC but not as aggressive as the normal IC
6019       // to satisfy the rare situation when resources are too limited.
6020       return std::max(IC / 2, SmallIC);
6021     } else {
6022       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6023       return SmallIC;
6024     }
6025   }
6026 
6027   // Interleave if this is a large loop (small loops are already dealt with by
6028   // this point) that could benefit from interleaving.
6029   if (AggressivelyInterleaveReductions) {
6030     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6031     return IC;
6032   }
6033 
6034   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6035   return 1;
6036 }
6037 
6038 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6039 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6040   // This function calculates the register usage by measuring the highest number
6041   // of values that are alive at a single location. Obviously, this is a very
6042   // rough estimation. We scan the loop in a topological order in order and
6043   // assign a number to each instruction. We use RPO to ensure that defs are
6044   // met before their users. We assume that each instruction that has in-loop
6045   // users starts an interval. We record every time that an in-loop value is
6046   // used, so we have a list of the first and last occurrences of each
6047   // instruction. Next, we transpose this data structure into a multi map that
6048   // holds the list of intervals that *end* at a specific location. This multi
6049   // map allows us to perform a linear search. We scan the instructions linearly
6050   // and record each time that a new interval starts, by placing it in a set.
6051   // If we find this value in the multi-map then we remove it from the set.
6052   // The max register usage is the maximum size of the set.
6053   // We also search for instructions that are defined outside the loop, but are
6054   // used inside the loop. We need this number separately from the max-interval
6055   // usage number because when we unroll, loop-invariant values do not take
6056   // more register.
6057   LoopBlocksDFS DFS(TheLoop);
6058   DFS.perform(LI);
6059 
6060   RegisterUsage RU;
6061 
6062   // Each 'key' in the map opens a new interval. The values
6063   // of the map are the index of the 'last seen' usage of the
6064   // instruction that is the key.
6065   using IntervalMap = DenseMap<Instruction *, unsigned>;
6066 
6067   // Maps instruction to its index.
6068   SmallVector<Instruction *, 64> IdxToInstr;
6069   // Marks the end of each interval.
6070   IntervalMap EndPoint;
6071   // Saves the list of instruction indices that are used in the loop.
6072   SmallPtrSet<Instruction *, 8> Ends;
6073   // Saves the list of values that are used in the loop but are
6074   // defined outside the loop, such as arguments and constants.
6075   SmallPtrSet<Value *, 8> LoopInvariants;
6076 
6077   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6078     for (Instruction &I : BB->instructionsWithoutDebug()) {
6079       IdxToInstr.push_back(&I);
6080 
6081       // Save the end location of each USE.
6082       for (Value *U : I.operands()) {
6083         auto *Instr = dyn_cast<Instruction>(U);
6084 
6085         // Ignore non-instruction values such as arguments, constants, etc.
6086         if (!Instr)
6087           continue;
6088 
6089         // If this instruction is outside the loop then record it and continue.
6090         if (!TheLoop->contains(Instr)) {
6091           LoopInvariants.insert(Instr);
6092           continue;
6093         }
6094 
6095         // Overwrite previous end points.
6096         EndPoint[Instr] = IdxToInstr.size();
6097         Ends.insert(Instr);
6098       }
6099     }
6100   }
6101 
6102   // Saves the list of intervals that end with the index in 'key'.
6103   using InstrList = SmallVector<Instruction *, 2>;
6104   DenseMap<unsigned, InstrList> TransposeEnds;
6105 
6106   // Transpose the EndPoints to a list of values that end at each index.
6107   for (auto &Interval : EndPoint)
6108     TransposeEnds[Interval.second].push_back(Interval.first);
6109 
6110   SmallPtrSet<Instruction *, 8> OpenIntervals;
6111   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6112   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6113 
6114   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6115 
6116   // A lambda that gets the register usage for the given type and VF.
6117   const auto &TTICapture = TTI;
6118   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6119     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6120       return 0;
6121     InstructionCost::CostType RegUsage =
6122         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6123     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6124            "Nonsensical values for register usage.");
6125     return RegUsage;
6126   };
6127 
6128   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6129     Instruction *I = IdxToInstr[i];
6130 
6131     // Remove all of the instructions that end at this location.
6132     InstrList &List = TransposeEnds[i];
6133     for (Instruction *ToRemove : List)
6134       OpenIntervals.erase(ToRemove);
6135 
6136     // Ignore instructions that are never used within the loop.
6137     if (!Ends.count(I))
6138       continue;
6139 
6140     // Skip ignored values.
6141     if (ValuesToIgnore.count(I))
6142       continue;
6143 
6144     // For each VF find the maximum usage of registers.
6145     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6146       // Count the number of live intervals.
6147       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6148 
6149       if (VFs[j].isScalar()) {
6150         for (auto Inst : OpenIntervals) {
6151           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6152           if (RegUsage.find(ClassID) == RegUsage.end())
6153             RegUsage[ClassID] = 1;
6154           else
6155             RegUsage[ClassID] += 1;
6156         }
6157       } else {
6158         collectUniformsAndScalars(VFs[j]);
6159         for (auto Inst : OpenIntervals) {
6160           // Skip ignored values for VF > 1.
6161           if (VecValuesToIgnore.count(Inst))
6162             continue;
6163           if (isScalarAfterVectorization(Inst, VFs[j])) {
6164             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6165             if (RegUsage.find(ClassID) == RegUsage.end())
6166               RegUsage[ClassID] = 1;
6167             else
6168               RegUsage[ClassID] += 1;
6169           } else {
6170             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6171             if (RegUsage.find(ClassID) == RegUsage.end())
6172               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6173             else
6174               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6175           }
6176         }
6177       }
6178 
6179       for (auto& pair : RegUsage) {
6180         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6181           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6182         else
6183           MaxUsages[j][pair.first] = pair.second;
6184       }
6185     }
6186 
6187     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6188                       << OpenIntervals.size() << '\n');
6189 
6190     // Add the current instruction to the list of open intervals.
6191     OpenIntervals.insert(I);
6192   }
6193 
6194   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6195     SmallMapVector<unsigned, unsigned, 4> Invariant;
6196 
6197     for (auto Inst : LoopInvariants) {
6198       unsigned Usage =
6199           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6200       unsigned ClassID =
6201           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6202       if (Invariant.find(ClassID) == Invariant.end())
6203         Invariant[ClassID] = Usage;
6204       else
6205         Invariant[ClassID] += Usage;
6206     }
6207 
6208     LLVM_DEBUG({
6209       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6210       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6211              << " item\n";
6212       for (const auto &pair : MaxUsages[i]) {
6213         dbgs() << "LV(REG): RegisterClass: "
6214                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6215                << " registers\n";
6216       }
6217       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6218              << " item\n";
6219       for (const auto &pair : Invariant) {
6220         dbgs() << "LV(REG): RegisterClass: "
6221                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6222                << " registers\n";
6223       }
6224     });
6225 
6226     RU.LoopInvariantRegs = Invariant;
6227     RU.MaxLocalUsers = MaxUsages[i];
6228     RUs[i] = RU;
6229   }
6230 
6231   return RUs;
6232 }
6233 
6234 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6235                                                            ElementCount VF) {
6236   // TODO: Cost model for emulated masked load/store is completely
6237   // broken. This hack guides the cost model to use an artificially
6238   // high enough value to practically disable vectorization with such
6239   // operations, except where previously deployed legality hack allowed
6240   // using very low cost values. This is to avoid regressions coming simply
6241   // from moving "masked load/store" check from legality to cost model.
6242   // Masked Load/Gather emulation was previously never allowed.
6243   // Limited number of Masked Store/Scatter emulation was allowed.
6244   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6245   return isa<LoadInst>(I) ||
6246          (isa<StoreInst>(I) &&
6247           NumPredStores > NumberOfStoresToPredicate);
6248 }
6249 
6250 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6251   // If we aren't vectorizing the loop, or if we've already collected the
6252   // instructions to scalarize, there's nothing to do. Collection may already
6253   // have occurred if we have a user-selected VF and are now computing the
6254   // expected cost for interleaving.
6255   if (VF.isScalar() || VF.isZero() ||
6256       InstsToScalarize.find(VF) != InstsToScalarize.end())
6257     return;
6258 
6259   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6260   // not profitable to scalarize any instructions, the presence of VF in the
6261   // map will indicate that we've analyzed it already.
6262   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6263 
6264   // Find all the instructions that are scalar with predication in the loop and
6265   // determine if it would be better to not if-convert the blocks they are in.
6266   // If so, we also record the instructions to scalarize.
6267   for (BasicBlock *BB : TheLoop->blocks()) {
6268     if (!blockNeedsPredicationForAnyReason(BB))
6269       continue;
6270     for (Instruction &I : *BB)
6271       if (isScalarWithPredication(&I, VF)) {
6272         ScalarCostsTy ScalarCosts;
6273         // Do not apply discount if scalable, because that would lead to
6274         // invalid scalarization costs.
6275         // Do not apply discount logic if hacked cost is needed
6276         // for emulated masked memrefs.
6277         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6278             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6279           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6280         // Remember that BB will remain after vectorization.
6281         PredicatedBBsAfterVectorization.insert(BB);
6282       }
6283   }
6284 }
6285 
6286 int LoopVectorizationCostModel::computePredInstDiscount(
6287     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6288   assert(!isUniformAfterVectorization(PredInst, VF) &&
6289          "Instruction marked uniform-after-vectorization will be predicated");
6290 
6291   // Initialize the discount to zero, meaning that the scalar version and the
6292   // vector version cost the same.
6293   InstructionCost Discount = 0;
6294 
6295   // Holds instructions to analyze. The instructions we visit are mapped in
6296   // ScalarCosts. Those instructions are the ones that would be scalarized if
6297   // we find that the scalar version costs less.
6298   SmallVector<Instruction *, 8> Worklist;
6299 
6300   // Returns true if the given instruction can be scalarized.
6301   auto canBeScalarized = [&](Instruction *I) -> bool {
6302     // We only attempt to scalarize instructions forming a single-use chain
6303     // from the original predicated block that would otherwise be vectorized.
6304     // Although not strictly necessary, we give up on instructions we know will
6305     // already be scalar to avoid traversing chains that are unlikely to be
6306     // beneficial.
6307     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6308         isScalarAfterVectorization(I, VF))
6309       return false;
6310 
6311     // If the instruction is scalar with predication, it will be analyzed
6312     // separately. We ignore it within the context of PredInst.
6313     if (isScalarWithPredication(I, VF))
6314       return false;
6315 
6316     // If any of the instruction's operands are uniform after vectorization,
6317     // the instruction cannot be scalarized. This prevents, for example, a
6318     // masked load from being scalarized.
6319     //
6320     // We assume we will only emit a value for lane zero of an instruction
6321     // marked uniform after vectorization, rather than VF identical values.
6322     // Thus, if we scalarize an instruction that uses a uniform, we would
6323     // create uses of values corresponding to the lanes we aren't emitting code
6324     // for. This behavior can be changed by allowing getScalarValue to clone
6325     // the lane zero values for uniforms rather than asserting.
6326     for (Use &U : I->operands())
6327       if (auto *J = dyn_cast<Instruction>(U.get()))
6328         if (isUniformAfterVectorization(J, VF))
6329           return false;
6330 
6331     // Otherwise, we can scalarize the instruction.
6332     return true;
6333   };
6334 
6335   // Compute the expected cost discount from scalarizing the entire expression
6336   // feeding the predicated instruction. We currently only consider expressions
6337   // that are single-use instruction chains.
6338   Worklist.push_back(PredInst);
6339   while (!Worklist.empty()) {
6340     Instruction *I = Worklist.pop_back_val();
6341 
6342     // If we've already analyzed the instruction, there's nothing to do.
6343     if (ScalarCosts.find(I) != ScalarCosts.end())
6344       continue;
6345 
6346     // Compute the cost of the vector instruction. Note that this cost already
6347     // includes the scalarization overhead of the predicated instruction.
6348     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6349 
6350     // Compute the cost of the scalarized instruction. This cost is the cost of
6351     // the instruction as if it wasn't if-converted and instead remained in the
6352     // predicated block. We will scale this cost by block probability after
6353     // computing the scalarization overhead.
6354     InstructionCost ScalarCost =
6355         VF.getFixedValue() *
6356         getInstructionCost(I, ElementCount::getFixed(1)).first;
6357 
6358     // Compute the scalarization overhead of needed insertelement instructions
6359     // and phi nodes.
6360     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6361       ScalarCost += TTI.getScalarizationOverhead(
6362           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6363           APInt::getAllOnes(VF.getFixedValue()), true, false);
6364       ScalarCost +=
6365           VF.getFixedValue() *
6366           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6367     }
6368 
6369     // Compute the scalarization overhead of needed extractelement
6370     // instructions. For each of the instruction's operands, if the operand can
6371     // be scalarized, add it to the worklist; otherwise, account for the
6372     // overhead.
6373     for (Use &U : I->operands())
6374       if (auto *J = dyn_cast<Instruction>(U.get())) {
6375         assert(VectorType::isValidElementType(J->getType()) &&
6376                "Instruction has non-scalar type");
6377         if (canBeScalarized(J))
6378           Worklist.push_back(J);
6379         else if (needsExtract(J, VF)) {
6380           ScalarCost += TTI.getScalarizationOverhead(
6381               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6382               APInt::getAllOnes(VF.getFixedValue()), false, true);
6383         }
6384       }
6385 
6386     // Scale the total scalar cost by block probability.
6387     ScalarCost /= getReciprocalPredBlockProb();
6388 
6389     // Compute the discount. A non-negative discount means the vector version
6390     // of the instruction costs more, and scalarizing would be beneficial.
6391     Discount += VectorCost - ScalarCost;
6392     ScalarCosts[I] = ScalarCost;
6393   }
6394 
6395   return *Discount.getValue();
6396 }
6397 
6398 LoopVectorizationCostModel::VectorizationCostTy
6399 LoopVectorizationCostModel::expectedCost(
6400     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6401   VectorizationCostTy Cost;
6402 
6403   // For each block.
6404   for (BasicBlock *BB : TheLoop->blocks()) {
6405     VectorizationCostTy BlockCost;
6406 
6407     // For each instruction in the old loop.
6408     for (Instruction &I : BB->instructionsWithoutDebug()) {
6409       // Skip ignored values.
6410       if (ValuesToIgnore.count(&I) ||
6411           (VF.isVector() && VecValuesToIgnore.count(&I)))
6412         continue;
6413 
6414       VectorizationCostTy C = getInstructionCost(&I, VF);
6415 
6416       // Check if we should override the cost.
6417       if (C.first.isValid() &&
6418           ForceTargetInstructionCost.getNumOccurrences() > 0)
6419         C.first = InstructionCost(ForceTargetInstructionCost);
6420 
6421       // Keep a list of instructions with invalid costs.
6422       if (Invalid && !C.first.isValid())
6423         Invalid->emplace_back(&I, VF);
6424 
6425       BlockCost.first += C.first;
6426       BlockCost.second |= C.second;
6427       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6428                         << " for VF " << VF << " For instruction: " << I
6429                         << '\n');
6430     }
6431 
6432     // If we are vectorizing a predicated block, it will have been
6433     // if-converted. This means that the block's instructions (aside from
6434     // stores and instructions that may divide by zero) will now be
6435     // unconditionally executed. For the scalar case, we may not always execute
6436     // the predicated block, if it is an if-else block. Thus, scale the block's
6437     // cost by the probability of executing it. blockNeedsPredication from
6438     // Legal is used so as to not include all blocks in tail folded loops.
6439     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6440       BlockCost.first /= getReciprocalPredBlockProb();
6441 
6442     Cost.first += BlockCost.first;
6443     Cost.second |= BlockCost.second;
6444   }
6445 
6446   return Cost;
6447 }
6448 
6449 /// Gets Address Access SCEV after verifying that the access pattern
6450 /// is loop invariant except the induction variable dependence.
6451 ///
6452 /// This SCEV can be sent to the Target in order to estimate the address
6453 /// calculation cost.
6454 static const SCEV *getAddressAccessSCEV(
6455               Value *Ptr,
6456               LoopVectorizationLegality *Legal,
6457               PredicatedScalarEvolution &PSE,
6458               const Loop *TheLoop) {
6459 
6460   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6461   if (!Gep)
6462     return nullptr;
6463 
6464   // We are looking for a gep with all loop invariant indices except for one
6465   // which should be an induction variable.
6466   auto SE = PSE.getSE();
6467   unsigned NumOperands = Gep->getNumOperands();
6468   for (unsigned i = 1; i < NumOperands; ++i) {
6469     Value *Opd = Gep->getOperand(i);
6470     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6471         !Legal->isInductionVariable(Opd))
6472       return nullptr;
6473   }
6474 
6475   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6476   return PSE.getSCEV(Ptr);
6477 }
6478 
6479 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6480   return Legal->hasStride(I->getOperand(0)) ||
6481          Legal->hasStride(I->getOperand(1));
6482 }
6483 
6484 InstructionCost
6485 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6486                                                         ElementCount VF) {
6487   assert(VF.isVector() &&
6488          "Scalarization cost of instruction implies vectorization.");
6489   if (VF.isScalable())
6490     return InstructionCost::getInvalid();
6491 
6492   Type *ValTy = getLoadStoreType(I);
6493   auto SE = PSE.getSE();
6494 
6495   unsigned AS = getLoadStoreAddressSpace(I);
6496   Value *Ptr = getLoadStorePointerOperand(I);
6497   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6498   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6499   //       that it is being called from this specific place.
6500 
6501   // Figure out whether the access is strided and get the stride value
6502   // if it's known in compile time
6503   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6504 
6505   // Get the cost of the scalar memory instruction and address computation.
6506   InstructionCost Cost =
6507       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6508 
6509   // Don't pass *I here, since it is scalar but will actually be part of a
6510   // vectorized loop where the user of it is a vectorized instruction.
6511   const Align Alignment = getLoadStoreAlignment(I);
6512   Cost += VF.getKnownMinValue() *
6513           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6514                               AS, TTI::TCK_RecipThroughput);
6515 
6516   // Get the overhead of the extractelement and insertelement instructions
6517   // we might create due to scalarization.
6518   Cost += getScalarizationOverhead(I, VF);
6519 
6520   // If we have a predicated load/store, it will need extra i1 extracts and
6521   // conditional branches, but may not be executed for each vector lane. Scale
6522   // the cost by the probability of executing the predicated block.
6523   if (isPredicatedInst(I, VF)) {
6524     Cost /= getReciprocalPredBlockProb();
6525 
6526     // Add the cost of an i1 extract and a branch
6527     auto *Vec_i1Ty =
6528         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6529     Cost += TTI.getScalarizationOverhead(
6530         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6531         /*Insert=*/false, /*Extract=*/true);
6532     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6533 
6534     if (useEmulatedMaskMemRefHack(I, VF))
6535       // Artificially setting to a high enough value to practically disable
6536       // vectorization with such operations.
6537       Cost = 3000000;
6538   }
6539 
6540   return Cost;
6541 }
6542 
6543 InstructionCost
6544 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6545                                                     ElementCount VF) {
6546   Type *ValTy = getLoadStoreType(I);
6547   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6548   Value *Ptr = getLoadStorePointerOperand(I);
6549   unsigned AS = getLoadStoreAddressSpace(I);
6550   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6551   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6552 
6553   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6554          "Stride should be 1 or -1 for consecutive memory access");
6555   const Align Alignment = getLoadStoreAlignment(I);
6556   InstructionCost Cost = 0;
6557   if (Legal->isMaskRequired(I))
6558     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6559                                       CostKind);
6560   else
6561     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6562                                 CostKind, I);
6563 
6564   bool Reverse = ConsecutiveStride < 0;
6565   if (Reverse)
6566     Cost +=
6567         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6568   return Cost;
6569 }
6570 
6571 InstructionCost
6572 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6573                                                 ElementCount VF) {
6574   assert(Legal->isUniformMemOp(*I));
6575 
6576   Type *ValTy = getLoadStoreType(I);
6577   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6578   const Align Alignment = getLoadStoreAlignment(I);
6579   unsigned AS = getLoadStoreAddressSpace(I);
6580   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6581   if (isa<LoadInst>(I)) {
6582     return TTI.getAddressComputationCost(ValTy) +
6583            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6584                                CostKind) +
6585            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6586   }
6587   StoreInst *SI = cast<StoreInst>(I);
6588 
6589   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6590   return TTI.getAddressComputationCost(ValTy) +
6591          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6592                              CostKind) +
6593          (isLoopInvariantStoreValue
6594               ? 0
6595               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6596                                        VF.getKnownMinValue() - 1));
6597 }
6598 
6599 InstructionCost
6600 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6601                                                  ElementCount VF) {
6602   Type *ValTy = getLoadStoreType(I);
6603   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6604   const Align Alignment = getLoadStoreAlignment(I);
6605   const Value *Ptr = getLoadStorePointerOperand(I);
6606 
6607   return TTI.getAddressComputationCost(VectorTy) +
6608          TTI.getGatherScatterOpCost(
6609              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6610              TargetTransformInfo::TCK_RecipThroughput, I);
6611 }
6612 
6613 InstructionCost
6614 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6615                                                    ElementCount VF) {
6616   // TODO: Once we have support for interleaving with scalable vectors
6617   // we can calculate the cost properly here.
6618   if (VF.isScalable())
6619     return InstructionCost::getInvalid();
6620 
6621   Type *ValTy = getLoadStoreType(I);
6622   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6623   unsigned AS = getLoadStoreAddressSpace(I);
6624 
6625   auto Group = getInterleavedAccessGroup(I);
6626   assert(Group && "Fail to get an interleaved access group.");
6627 
6628   unsigned InterleaveFactor = Group->getFactor();
6629   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6630 
6631   // Holds the indices of existing members in the interleaved group.
6632   SmallVector<unsigned, 4> Indices;
6633   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6634     if (Group->getMember(IF))
6635       Indices.push_back(IF);
6636 
6637   // Calculate the cost of the whole interleaved group.
6638   bool UseMaskForGaps =
6639       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6640       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6641   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6642       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6643       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6644 
6645   if (Group->isReverse()) {
6646     // TODO: Add support for reversed masked interleaved access.
6647     assert(!Legal->isMaskRequired(I) &&
6648            "Reverse masked interleaved access not supported.");
6649     Cost +=
6650         Group->getNumMembers() *
6651         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6652   }
6653   return Cost;
6654 }
6655 
6656 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6657     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6658   using namespace llvm::PatternMatch;
6659   // Early exit for no inloop reductions
6660   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6661     return None;
6662   auto *VectorTy = cast<VectorType>(Ty);
6663 
6664   // We are looking for a pattern of, and finding the minimal acceptable cost:
6665   //  reduce(mul(ext(A), ext(B))) or
6666   //  reduce(mul(A, B)) or
6667   //  reduce(ext(A)) or
6668   //  reduce(A).
6669   // The basic idea is that we walk down the tree to do that, finding the root
6670   // reduction instruction in InLoopReductionImmediateChains. From there we find
6671   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6672   // of the components. If the reduction cost is lower then we return it for the
6673   // reduction instruction and 0 for the other instructions in the pattern. If
6674   // it is not we return an invalid cost specifying the orignal cost method
6675   // should be used.
6676   Instruction *RetI = I;
6677   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6678     if (!RetI->hasOneUser())
6679       return None;
6680     RetI = RetI->user_back();
6681   }
6682   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6683       RetI->user_back()->getOpcode() == Instruction::Add) {
6684     if (!RetI->hasOneUser())
6685       return None;
6686     RetI = RetI->user_back();
6687   }
6688 
6689   // Test if the found instruction is a reduction, and if not return an invalid
6690   // cost specifying the parent to use the original cost modelling.
6691   if (!InLoopReductionImmediateChains.count(RetI))
6692     return None;
6693 
6694   // Find the reduction this chain is a part of and calculate the basic cost of
6695   // the reduction on its own.
6696   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6697   Instruction *ReductionPhi = LastChain;
6698   while (!isa<PHINode>(ReductionPhi))
6699     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6700 
6701   const RecurrenceDescriptor &RdxDesc =
6702       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6703 
6704   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6705       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6706 
6707   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6708   // normal fmul instruction to the cost of the fadd reduction.
6709   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6710     BaseCost +=
6711         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6712 
6713   // If we're using ordered reductions then we can just return the base cost
6714   // here, since getArithmeticReductionCost calculates the full ordered
6715   // reduction cost when FP reassociation is not allowed.
6716   if (useOrderedReductions(RdxDesc))
6717     return BaseCost;
6718 
6719   // Get the operand that was not the reduction chain and match it to one of the
6720   // patterns, returning the better cost if it is found.
6721   Instruction *RedOp = RetI->getOperand(1) == LastChain
6722                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6723                            : dyn_cast<Instruction>(RetI->getOperand(1));
6724 
6725   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6726 
6727   Instruction *Op0, *Op1;
6728   if (RedOp &&
6729       match(RedOp,
6730             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6731       match(Op0, m_ZExtOrSExt(m_Value())) &&
6732       Op0->getOpcode() == Op1->getOpcode() &&
6733       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6734       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6735       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6736 
6737     // Matched reduce(ext(mul(ext(A), ext(B)))
6738     // Note that the extend opcodes need to all match, or if A==B they will have
6739     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6740     // which is equally fine.
6741     bool IsUnsigned = isa<ZExtInst>(Op0);
6742     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6743     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6744 
6745     InstructionCost ExtCost =
6746         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6747                              TTI::CastContextHint::None, CostKind, Op0);
6748     InstructionCost MulCost =
6749         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6750     InstructionCost Ext2Cost =
6751         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6752                              TTI::CastContextHint::None, CostKind, RedOp);
6753 
6754     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6755         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6756         CostKind);
6757 
6758     if (RedCost.isValid() &&
6759         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6760       return I == RetI ? RedCost : 0;
6761   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6762              !TheLoop->isLoopInvariant(RedOp)) {
6763     // Matched reduce(ext(A))
6764     bool IsUnsigned = isa<ZExtInst>(RedOp);
6765     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6766     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6767         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6768         CostKind);
6769 
6770     InstructionCost ExtCost =
6771         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6772                              TTI::CastContextHint::None, CostKind, RedOp);
6773     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6774       return I == RetI ? RedCost : 0;
6775   } else if (RedOp &&
6776              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6777     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6778         Op0->getOpcode() == Op1->getOpcode() &&
6779         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6780       bool IsUnsigned = isa<ZExtInst>(Op0);
6781       Type *Op0Ty = Op0->getOperand(0)->getType();
6782       Type *Op1Ty = Op1->getOperand(0)->getType();
6783       Type *LargestOpTy =
6784           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6785                                                                     : Op0Ty;
6786       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6787 
6788       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6789       // different sizes. We take the largest type as the ext to reduce, and add
6790       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6791       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6792           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6793           TTI::CastContextHint::None, CostKind, Op0);
6794       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6795           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6796           TTI::CastContextHint::None, CostKind, Op1);
6797       InstructionCost MulCost =
6798           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6799 
6800       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6801           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6802           CostKind);
6803       InstructionCost ExtraExtCost = 0;
6804       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6805         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6806         ExtraExtCost = TTI.getCastInstrCost(
6807             ExtraExtOp->getOpcode(), ExtType,
6808             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
6809             TTI::CastContextHint::None, CostKind, ExtraExtOp);
6810       }
6811 
6812       if (RedCost.isValid() &&
6813           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6814         return I == RetI ? RedCost : 0;
6815     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
6816       // Matched reduce(mul())
6817       InstructionCost MulCost =
6818           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6819 
6820       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6821           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6822           CostKind);
6823 
6824       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6825         return I == RetI ? RedCost : 0;
6826     }
6827   }
6828 
6829   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
6830 }
6831 
6832 InstructionCost
6833 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6834                                                      ElementCount VF) {
6835   // Calculate scalar cost only. Vectorization cost should be ready at this
6836   // moment.
6837   if (VF.isScalar()) {
6838     Type *ValTy = getLoadStoreType(I);
6839     const Align Alignment = getLoadStoreAlignment(I);
6840     unsigned AS = getLoadStoreAddressSpace(I);
6841 
6842     return TTI.getAddressComputationCost(ValTy) +
6843            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6844                                TTI::TCK_RecipThroughput, I);
6845   }
6846   return getWideningCost(I, VF);
6847 }
6848 
6849 LoopVectorizationCostModel::VectorizationCostTy
6850 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6851                                                ElementCount VF) {
6852   // If we know that this instruction will remain uniform, check the cost of
6853   // the scalar version.
6854   if (isUniformAfterVectorization(I, VF))
6855     VF = ElementCount::getFixed(1);
6856 
6857   if (VF.isVector() && isProfitableToScalarize(I, VF))
6858     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6859 
6860   // Forced scalars do not have any scalarization overhead.
6861   auto ForcedScalar = ForcedScalars.find(VF);
6862   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6863     auto InstSet = ForcedScalar->second;
6864     if (InstSet.count(I))
6865       return VectorizationCostTy(
6866           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6867            VF.getKnownMinValue()),
6868           false);
6869   }
6870 
6871   Type *VectorTy;
6872   InstructionCost C = getInstructionCost(I, VF, VectorTy);
6873 
6874   bool TypeNotScalarized = false;
6875   if (VF.isVector() && VectorTy->isVectorTy()) {
6876     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
6877     if (NumParts)
6878       TypeNotScalarized = NumParts < VF.getKnownMinValue();
6879     else
6880       C = InstructionCost::getInvalid();
6881   }
6882   return VectorizationCostTy(C, TypeNotScalarized);
6883 }
6884 
6885 InstructionCost
6886 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6887                                                      ElementCount VF) const {
6888 
6889   // There is no mechanism yet to create a scalable scalarization loop,
6890   // so this is currently Invalid.
6891   if (VF.isScalable())
6892     return InstructionCost::getInvalid();
6893 
6894   if (VF.isScalar())
6895     return 0;
6896 
6897   InstructionCost Cost = 0;
6898   Type *RetTy = ToVectorTy(I->getType(), VF);
6899   if (!RetTy->isVoidTy() &&
6900       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6901     Cost += TTI.getScalarizationOverhead(
6902         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
6903         false);
6904 
6905   // Some targets keep addresses scalar.
6906   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6907     return Cost;
6908 
6909   // Some targets support efficient element stores.
6910   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6911     return Cost;
6912 
6913   // Collect operands to consider.
6914   CallInst *CI = dyn_cast<CallInst>(I);
6915   Instruction::op_range Ops = CI ? CI->args() : I->operands();
6916 
6917   // Skip operands that do not require extraction/scalarization and do not incur
6918   // any overhead.
6919   SmallVector<Type *> Tys;
6920   for (auto *V : filterExtractingOperands(Ops, VF))
6921     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
6922   return Cost + TTI.getOperandsScalarizationOverhead(
6923                     filterExtractingOperands(Ops, VF), Tys);
6924 }
6925 
6926 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6927   if (VF.isScalar())
6928     return;
6929   NumPredStores = 0;
6930   for (BasicBlock *BB : TheLoop->blocks()) {
6931     // For each instruction in the old loop.
6932     for (Instruction &I : *BB) {
6933       Value *Ptr =  getLoadStorePointerOperand(&I);
6934       if (!Ptr)
6935         continue;
6936 
6937       // TODO: We should generate better code and update the cost model for
6938       // predicated uniform stores. Today they are treated as any other
6939       // predicated store (see added test cases in
6940       // invariant-store-vectorization.ll).
6941       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
6942         NumPredStores++;
6943 
6944       if (Legal->isUniformMemOp(I)) {
6945         // TODO: Avoid replicating loads and stores instead of
6946         // relying on instcombine to remove them.
6947         // Load: Scalar load + broadcast
6948         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6949         InstructionCost Cost;
6950         if (isa<StoreInst>(&I) && VF.isScalable() &&
6951             isLegalGatherOrScatter(&I, VF)) {
6952           Cost = getGatherScatterCost(&I, VF);
6953           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
6954         } else {
6955           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
6956                  "Cannot yet scalarize uniform stores");
6957           Cost = getUniformMemOpCost(&I, VF);
6958           setWideningDecision(&I, VF, CM_Scalarize, Cost);
6959         }
6960         continue;
6961       }
6962 
6963       // We assume that widening is the best solution when possible.
6964       if (memoryInstructionCanBeWidened(&I, VF)) {
6965         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
6966         int ConsecutiveStride = Legal->isConsecutivePtr(
6967             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
6968         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6969                "Expected consecutive stride.");
6970         InstWidening Decision =
6971             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6972         setWideningDecision(&I, VF, Decision, Cost);
6973         continue;
6974       }
6975 
6976       // Choose between Interleaving, Gather/Scatter or Scalarization.
6977       InstructionCost InterleaveCost = InstructionCost::getInvalid();
6978       unsigned NumAccesses = 1;
6979       if (isAccessInterleaved(&I)) {
6980         auto Group = getInterleavedAccessGroup(&I);
6981         assert(Group && "Fail to get an interleaved access group.");
6982 
6983         // Make one decision for the whole group.
6984         if (getWideningDecision(&I, VF) != CM_Unknown)
6985           continue;
6986 
6987         NumAccesses = Group->getNumMembers();
6988         if (interleavedAccessCanBeWidened(&I, VF))
6989           InterleaveCost = getInterleaveGroupCost(&I, VF);
6990       }
6991 
6992       InstructionCost GatherScatterCost =
6993           isLegalGatherOrScatter(&I, VF)
6994               ? getGatherScatterCost(&I, VF) * NumAccesses
6995               : InstructionCost::getInvalid();
6996 
6997       InstructionCost ScalarizationCost =
6998           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6999 
7000       // Choose better solution for the current VF,
7001       // write down this decision and use it during vectorization.
7002       InstructionCost Cost;
7003       InstWidening Decision;
7004       if (InterleaveCost <= GatherScatterCost &&
7005           InterleaveCost < ScalarizationCost) {
7006         Decision = CM_Interleave;
7007         Cost = InterleaveCost;
7008       } else if (GatherScatterCost < ScalarizationCost) {
7009         Decision = CM_GatherScatter;
7010         Cost = GatherScatterCost;
7011       } else {
7012         Decision = CM_Scalarize;
7013         Cost = ScalarizationCost;
7014       }
7015       // If the instructions belongs to an interleave group, the whole group
7016       // receives the same decision. The whole group receives the cost, but
7017       // the cost will actually be assigned to one instruction.
7018       if (auto Group = getInterleavedAccessGroup(&I))
7019         setWideningDecision(Group, VF, Decision, Cost);
7020       else
7021         setWideningDecision(&I, VF, Decision, Cost);
7022     }
7023   }
7024 
7025   // Make sure that any load of address and any other address computation
7026   // remains scalar unless there is gather/scatter support. This avoids
7027   // inevitable extracts into address registers, and also has the benefit of
7028   // activating LSR more, since that pass can't optimize vectorized
7029   // addresses.
7030   if (TTI.prefersVectorizedAddressing())
7031     return;
7032 
7033   // Start with all scalar pointer uses.
7034   SmallPtrSet<Instruction *, 8> AddrDefs;
7035   for (BasicBlock *BB : TheLoop->blocks())
7036     for (Instruction &I : *BB) {
7037       Instruction *PtrDef =
7038         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7039       if (PtrDef && TheLoop->contains(PtrDef) &&
7040           getWideningDecision(&I, VF) != CM_GatherScatter)
7041         AddrDefs.insert(PtrDef);
7042     }
7043 
7044   // Add all instructions used to generate the addresses.
7045   SmallVector<Instruction *, 4> Worklist;
7046   append_range(Worklist, AddrDefs);
7047   while (!Worklist.empty()) {
7048     Instruction *I = Worklist.pop_back_val();
7049     for (auto &Op : I->operands())
7050       if (auto *InstOp = dyn_cast<Instruction>(Op))
7051         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7052             AddrDefs.insert(InstOp).second)
7053           Worklist.push_back(InstOp);
7054   }
7055 
7056   for (auto *I : AddrDefs) {
7057     if (isa<LoadInst>(I)) {
7058       // Setting the desired widening decision should ideally be handled in
7059       // by cost functions, but since this involves the task of finding out
7060       // if the loaded register is involved in an address computation, it is
7061       // instead changed here when we know this is the case.
7062       InstWidening Decision = getWideningDecision(I, VF);
7063       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7064         // Scalarize a widened load of address.
7065         setWideningDecision(
7066             I, VF, CM_Scalarize,
7067             (VF.getKnownMinValue() *
7068              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7069       else if (auto Group = getInterleavedAccessGroup(I)) {
7070         // Scalarize an interleave group of address loads.
7071         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7072           if (Instruction *Member = Group->getMember(I))
7073             setWideningDecision(
7074                 Member, VF, CM_Scalarize,
7075                 (VF.getKnownMinValue() *
7076                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7077         }
7078       }
7079     } else
7080       // Make sure I gets scalarized and a cost estimate without
7081       // scalarization overhead.
7082       ForcedScalars[VF].insert(I);
7083   }
7084 }
7085 
7086 InstructionCost
7087 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7088                                                Type *&VectorTy) {
7089   Type *RetTy = I->getType();
7090   if (canTruncateToMinimalBitwidth(I, VF))
7091     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7092   auto SE = PSE.getSE();
7093   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7094 
7095   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7096                                                 ElementCount VF) -> bool {
7097     if (VF.isScalar())
7098       return true;
7099 
7100     auto Scalarized = InstsToScalarize.find(VF);
7101     assert(Scalarized != InstsToScalarize.end() &&
7102            "VF not yet analyzed for scalarization profitability");
7103     return !Scalarized->second.count(I) &&
7104            llvm::all_of(I->users(), [&](User *U) {
7105              auto *UI = cast<Instruction>(U);
7106              return !Scalarized->second.count(UI);
7107            });
7108   };
7109   (void) hasSingleCopyAfterVectorization;
7110 
7111   if (isScalarAfterVectorization(I, VF)) {
7112     // With the exception of GEPs and PHIs, after scalarization there should
7113     // only be one copy of the instruction generated in the loop. This is
7114     // because the VF is either 1, or any instructions that need scalarizing
7115     // have already been dealt with by the the time we get here. As a result,
7116     // it means we don't have to multiply the instruction cost by VF.
7117     assert(I->getOpcode() == Instruction::GetElementPtr ||
7118            I->getOpcode() == Instruction::PHI ||
7119            (I->getOpcode() == Instruction::BitCast &&
7120             I->getType()->isPointerTy()) ||
7121            hasSingleCopyAfterVectorization(I, VF));
7122     VectorTy = RetTy;
7123   } else
7124     VectorTy = ToVectorTy(RetTy, VF);
7125 
7126   // TODO: We need to estimate the cost of intrinsic calls.
7127   switch (I->getOpcode()) {
7128   case Instruction::GetElementPtr:
7129     // We mark this instruction as zero-cost because the cost of GEPs in
7130     // vectorized code depends on whether the corresponding memory instruction
7131     // is scalarized or not. Therefore, we handle GEPs with the memory
7132     // instruction cost.
7133     return 0;
7134   case Instruction::Br: {
7135     // In cases of scalarized and predicated instructions, there will be VF
7136     // predicated blocks in the vectorized loop. Each branch around these
7137     // blocks requires also an extract of its vector compare i1 element.
7138     bool ScalarPredicatedBB = false;
7139     BranchInst *BI = cast<BranchInst>(I);
7140     if (VF.isVector() && BI->isConditional() &&
7141         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7142          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7143       ScalarPredicatedBB = true;
7144 
7145     if (ScalarPredicatedBB) {
7146       // Not possible to scalarize scalable vector with predicated instructions.
7147       if (VF.isScalable())
7148         return InstructionCost::getInvalid();
7149       // Return cost for branches around scalarized and predicated blocks.
7150       auto *Vec_i1Ty =
7151           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7152       return (
7153           TTI.getScalarizationOverhead(
7154               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7155           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7156     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7157       // The back-edge branch will remain, as will all scalar branches.
7158       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7159     else
7160       // This branch will be eliminated by if-conversion.
7161       return 0;
7162     // Note: We currently assume zero cost for an unconditional branch inside
7163     // a predicated block since it will become a fall-through, although we
7164     // may decide in the future to call TTI for all branches.
7165   }
7166   case Instruction::PHI: {
7167     auto *Phi = cast<PHINode>(I);
7168 
7169     // First-order recurrences are replaced by vector shuffles inside the loop.
7170     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7171     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7172       return TTI.getShuffleCost(
7173           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7174           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7175 
7176     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7177     // converted into select instructions. We require N - 1 selects per phi
7178     // node, where N is the number of incoming values.
7179     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7180       return (Phi->getNumIncomingValues() - 1) *
7181              TTI.getCmpSelInstrCost(
7182                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7183                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7184                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7185 
7186     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7187   }
7188   case Instruction::UDiv:
7189   case Instruction::SDiv:
7190   case Instruction::URem:
7191   case Instruction::SRem:
7192     // If we have a predicated instruction, it may not be executed for each
7193     // vector lane. Get the scalarization cost and scale this amount by the
7194     // probability of executing the predicated block. If the instruction is not
7195     // predicated, we fall through to the next case.
7196     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7197       InstructionCost Cost = 0;
7198 
7199       // These instructions have a non-void type, so account for the phi nodes
7200       // that we will create. This cost is likely to be zero. The phi node
7201       // cost, if any, should be scaled by the block probability because it
7202       // models a copy at the end of each predicated block.
7203       Cost += VF.getKnownMinValue() *
7204               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7205 
7206       // The cost of the non-predicated instruction.
7207       Cost += VF.getKnownMinValue() *
7208               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7209 
7210       // The cost of insertelement and extractelement instructions needed for
7211       // scalarization.
7212       Cost += getScalarizationOverhead(I, VF);
7213 
7214       // Scale the cost by the probability of executing the predicated blocks.
7215       // This assumes the predicated block for each vector lane is equally
7216       // likely.
7217       return Cost / getReciprocalPredBlockProb();
7218     }
7219     LLVM_FALLTHROUGH;
7220   case Instruction::Add:
7221   case Instruction::FAdd:
7222   case Instruction::Sub:
7223   case Instruction::FSub:
7224   case Instruction::Mul:
7225   case Instruction::FMul:
7226   case Instruction::FDiv:
7227   case Instruction::FRem:
7228   case Instruction::Shl:
7229   case Instruction::LShr:
7230   case Instruction::AShr:
7231   case Instruction::And:
7232   case Instruction::Or:
7233   case Instruction::Xor: {
7234     // Since we will replace the stride by 1 the multiplication should go away.
7235     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7236       return 0;
7237 
7238     // Detect reduction patterns
7239     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7240       return *RedCost;
7241 
7242     // Certain instructions can be cheaper to vectorize if they have a constant
7243     // second vector operand. One example of this are shifts on x86.
7244     Value *Op2 = I->getOperand(1);
7245     TargetTransformInfo::OperandValueProperties Op2VP;
7246     TargetTransformInfo::OperandValueKind Op2VK =
7247         TTI.getOperandInfo(Op2, Op2VP);
7248     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7249       Op2VK = TargetTransformInfo::OK_UniformValue;
7250 
7251     SmallVector<const Value *, 4> Operands(I->operand_values());
7252     return TTI.getArithmeticInstrCost(
7253         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7254         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7255   }
7256   case Instruction::FNeg: {
7257     return TTI.getArithmeticInstrCost(
7258         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7259         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7260         TargetTransformInfo::OP_None, I->getOperand(0), I);
7261   }
7262   case Instruction::Select: {
7263     SelectInst *SI = cast<SelectInst>(I);
7264     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7265     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7266 
7267     const Value *Op0, *Op1;
7268     using namespace llvm::PatternMatch;
7269     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7270                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7271       // select x, y, false --> x & y
7272       // select x, true, y --> x | y
7273       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7274       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7275       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7276       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7277       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7278               Op1->getType()->getScalarSizeInBits() == 1);
7279 
7280       SmallVector<const Value *, 2> Operands{Op0, Op1};
7281       return TTI.getArithmeticInstrCost(
7282           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7283           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7284     }
7285 
7286     Type *CondTy = SI->getCondition()->getType();
7287     if (!ScalarCond)
7288       CondTy = VectorType::get(CondTy, VF);
7289 
7290     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7291     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7292       Pred = Cmp->getPredicate();
7293     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7294                                   CostKind, I);
7295   }
7296   case Instruction::ICmp:
7297   case Instruction::FCmp: {
7298     Type *ValTy = I->getOperand(0)->getType();
7299     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7300     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7301       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7302     VectorTy = ToVectorTy(ValTy, VF);
7303     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7304                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7305                                   I);
7306   }
7307   case Instruction::Store:
7308   case Instruction::Load: {
7309     ElementCount Width = VF;
7310     if (Width.isVector()) {
7311       InstWidening Decision = getWideningDecision(I, Width);
7312       assert(Decision != CM_Unknown &&
7313              "CM decision should be taken at this point");
7314       if (Decision == CM_Scalarize)
7315         Width = ElementCount::getFixed(1);
7316     }
7317     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7318     return getMemoryInstructionCost(I, VF);
7319   }
7320   case Instruction::BitCast:
7321     if (I->getType()->isPointerTy())
7322       return 0;
7323     LLVM_FALLTHROUGH;
7324   case Instruction::ZExt:
7325   case Instruction::SExt:
7326   case Instruction::FPToUI:
7327   case Instruction::FPToSI:
7328   case Instruction::FPExt:
7329   case Instruction::PtrToInt:
7330   case Instruction::IntToPtr:
7331   case Instruction::SIToFP:
7332   case Instruction::UIToFP:
7333   case Instruction::Trunc:
7334   case Instruction::FPTrunc: {
7335     // Computes the CastContextHint from a Load/Store instruction.
7336     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7337       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7338              "Expected a load or a store!");
7339 
7340       if (VF.isScalar() || !TheLoop->contains(I))
7341         return TTI::CastContextHint::Normal;
7342 
7343       switch (getWideningDecision(I, VF)) {
7344       case LoopVectorizationCostModel::CM_GatherScatter:
7345         return TTI::CastContextHint::GatherScatter;
7346       case LoopVectorizationCostModel::CM_Interleave:
7347         return TTI::CastContextHint::Interleave;
7348       case LoopVectorizationCostModel::CM_Scalarize:
7349       case LoopVectorizationCostModel::CM_Widen:
7350         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7351                                         : TTI::CastContextHint::Normal;
7352       case LoopVectorizationCostModel::CM_Widen_Reverse:
7353         return TTI::CastContextHint::Reversed;
7354       case LoopVectorizationCostModel::CM_Unknown:
7355         llvm_unreachable("Instr did not go through cost modelling?");
7356       }
7357 
7358       llvm_unreachable("Unhandled case!");
7359     };
7360 
7361     unsigned Opcode = I->getOpcode();
7362     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7363     // For Trunc, the context is the only user, which must be a StoreInst.
7364     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7365       if (I->hasOneUse())
7366         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7367           CCH = ComputeCCH(Store);
7368     }
7369     // For Z/Sext, the context is the operand, which must be a LoadInst.
7370     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7371              Opcode == Instruction::FPExt) {
7372       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7373         CCH = ComputeCCH(Load);
7374     }
7375 
7376     // We optimize the truncation of induction variables having constant
7377     // integer steps. The cost of these truncations is the same as the scalar
7378     // operation.
7379     if (isOptimizableIVTruncate(I, VF)) {
7380       auto *Trunc = cast<TruncInst>(I);
7381       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7382                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7383     }
7384 
7385     // Detect reduction patterns
7386     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7387       return *RedCost;
7388 
7389     Type *SrcScalarTy = I->getOperand(0)->getType();
7390     Type *SrcVecTy =
7391         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7392     if (canTruncateToMinimalBitwidth(I, VF)) {
7393       // This cast is going to be shrunk. This may remove the cast or it might
7394       // turn it into slightly different cast. For example, if MinBW == 16,
7395       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7396       //
7397       // Calculate the modified src and dest types.
7398       Type *MinVecTy = VectorTy;
7399       if (Opcode == Instruction::Trunc) {
7400         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7401         VectorTy =
7402             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7403       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7404         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7405         VectorTy =
7406             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7407       }
7408     }
7409 
7410     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7411   }
7412   case Instruction::Call: {
7413     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7414       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7415         return *RedCost;
7416     bool NeedToScalarize;
7417     CallInst *CI = cast<CallInst>(I);
7418     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7419     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7420       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7421       return std::min(CallCost, IntrinsicCost);
7422     }
7423     return CallCost;
7424   }
7425   case Instruction::ExtractValue:
7426     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7427   case Instruction::Alloca:
7428     // We cannot easily widen alloca to a scalable alloca, as
7429     // the result would need to be a vector of pointers.
7430     if (VF.isScalable())
7431       return InstructionCost::getInvalid();
7432     LLVM_FALLTHROUGH;
7433   default:
7434     // This opcode is unknown. Assume that it is the same as 'mul'.
7435     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7436   } // end of switch.
7437 }
7438 
7439 char LoopVectorize::ID = 0;
7440 
7441 static const char lv_name[] = "Loop Vectorization";
7442 
7443 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7444 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7445 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7446 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7447 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7448 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7449 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7450 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7451 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7452 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7453 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7454 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7455 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7456 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7457 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7458 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7459 
7460 namespace llvm {
7461 
7462 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7463 
7464 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7465                               bool VectorizeOnlyWhenForced) {
7466   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7467 }
7468 
7469 } // end namespace llvm
7470 
7471 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7472   // Check if the pointer operand of a load or store instruction is
7473   // consecutive.
7474   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7475     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7476   return false;
7477 }
7478 
7479 void LoopVectorizationCostModel::collectValuesToIgnore() {
7480   // Ignore ephemeral values.
7481   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7482 
7483   // Ignore type-promoting instructions we identified during reduction
7484   // detection.
7485   for (auto &Reduction : Legal->getReductionVars()) {
7486     const RecurrenceDescriptor &RedDes = Reduction.second;
7487     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7488     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7489   }
7490   // Ignore type-casting instructions we identified during induction
7491   // detection.
7492   for (auto &Induction : Legal->getInductionVars()) {
7493     const InductionDescriptor &IndDes = Induction.second;
7494     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7495     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7496   }
7497 }
7498 
7499 void LoopVectorizationCostModel::collectInLoopReductions() {
7500   for (auto &Reduction : Legal->getReductionVars()) {
7501     PHINode *Phi = Reduction.first;
7502     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7503 
7504     // We don't collect reductions that are type promoted (yet).
7505     if (RdxDesc.getRecurrenceType() != Phi->getType())
7506       continue;
7507 
7508     // If the target would prefer this reduction to happen "in-loop", then we
7509     // want to record it as such.
7510     unsigned Opcode = RdxDesc.getOpcode();
7511     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7512         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7513                                    TargetTransformInfo::ReductionFlags()))
7514       continue;
7515 
7516     // Check that we can correctly put the reductions into the loop, by
7517     // finding the chain of operations that leads from the phi to the loop
7518     // exit value.
7519     SmallVector<Instruction *, 4> ReductionOperations =
7520         RdxDesc.getReductionOpChain(Phi, TheLoop);
7521     bool InLoop = !ReductionOperations.empty();
7522     if (InLoop) {
7523       InLoopReductionChains[Phi] = ReductionOperations;
7524       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7525       Instruction *LastChain = Phi;
7526       for (auto *I : ReductionOperations) {
7527         InLoopReductionImmediateChains[I] = LastChain;
7528         LastChain = I;
7529       }
7530     }
7531     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7532                       << " reduction for phi: " << *Phi << "\n");
7533   }
7534 }
7535 
7536 // TODO: we could return a pair of values that specify the max VF and
7537 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7538 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7539 // doesn't have a cost model that can choose which plan to execute if
7540 // more than one is generated.
7541 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7542                                  LoopVectorizationCostModel &CM) {
7543   unsigned WidestType;
7544   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7545   return WidestVectorRegBits / WidestType;
7546 }
7547 
7548 VectorizationFactor
7549 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7550   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7551   ElementCount VF = UserVF;
7552   // Outer loop handling: They may require CFG and instruction level
7553   // transformations before even evaluating whether vectorization is profitable.
7554   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7555   // the vectorization pipeline.
7556   if (!OrigLoop->isInnermost()) {
7557     // If the user doesn't provide a vectorization factor, determine a
7558     // reasonable one.
7559     if (UserVF.isZero()) {
7560       VF = ElementCount::getFixed(determineVPlanVF(
7561           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7562               .getFixedSize(),
7563           CM));
7564       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7565 
7566       // Make sure we have a VF > 1 for stress testing.
7567       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7568         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7569                           << "overriding computed VF.\n");
7570         VF = ElementCount::getFixed(4);
7571       }
7572     }
7573     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7574     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7575            "VF needs to be a power of two");
7576     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7577                       << "VF " << VF << " to build VPlans.\n");
7578     buildVPlans(VF, VF);
7579 
7580     // For VPlan build stress testing, we bail out after VPlan construction.
7581     if (VPlanBuildStressTest)
7582       return VectorizationFactor::Disabled();
7583 
7584     return {VF, 0 /*Cost*/};
7585   }
7586 
7587   LLVM_DEBUG(
7588       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7589                 "VPlan-native path.\n");
7590   return VectorizationFactor::Disabled();
7591 }
7592 
7593 Optional<VectorizationFactor>
7594 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7595   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7596   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7597   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7598     return None;
7599 
7600   // Invalidate interleave groups if all blocks of loop will be predicated.
7601   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7602       !useMaskedInterleavedAccesses(*TTI)) {
7603     LLVM_DEBUG(
7604         dbgs()
7605         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7606            "which requires masked-interleaved support.\n");
7607     if (CM.InterleaveInfo.invalidateGroups())
7608       // Invalidating interleave groups also requires invalidating all decisions
7609       // based on them, which includes widening decisions and uniform and scalar
7610       // values.
7611       CM.invalidateCostModelingDecisions();
7612   }
7613 
7614   ElementCount MaxUserVF =
7615       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7616   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7617   if (!UserVF.isZero() && UserVFIsLegal) {
7618     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7619            "VF needs to be a power of two");
7620     // Collect the instructions (and their associated costs) that will be more
7621     // profitable to scalarize.
7622     if (CM.selectUserVectorizationFactor(UserVF)) {
7623       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7624       CM.collectInLoopReductions();
7625       buildVPlansWithVPRecipes(UserVF, UserVF);
7626       LLVM_DEBUG(printPlans(dbgs()));
7627       return {{UserVF, 0}};
7628     } else
7629       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7630                               "InvalidCost", ORE, OrigLoop);
7631   }
7632 
7633   // Populate the set of Vectorization Factor Candidates.
7634   ElementCountSet VFCandidates;
7635   for (auto VF = ElementCount::getFixed(1);
7636        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7637     VFCandidates.insert(VF);
7638   for (auto VF = ElementCount::getScalable(1);
7639        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7640     VFCandidates.insert(VF);
7641 
7642   for (const auto &VF : VFCandidates) {
7643     // Collect Uniform and Scalar instructions after vectorization with VF.
7644     CM.collectUniformsAndScalars(VF);
7645 
7646     // Collect the instructions (and their associated costs) that will be more
7647     // profitable to scalarize.
7648     if (VF.isVector())
7649       CM.collectInstsToScalarize(VF);
7650   }
7651 
7652   CM.collectInLoopReductions();
7653   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7654   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7655 
7656   LLVM_DEBUG(printPlans(dbgs()));
7657   if (!MaxFactors.hasVector())
7658     return VectorizationFactor::Disabled();
7659 
7660   // Select the optimal vectorization factor.
7661   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7662 
7663   // Check if it is profitable to vectorize with runtime checks.
7664   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7665   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7666     bool PragmaThresholdReached =
7667         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7668     bool ThresholdReached =
7669         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7670     if ((ThresholdReached && !Hints.allowReordering()) ||
7671         PragmaThresholdReached) {
7672       ORE->emit([&]() {
7673         return OptimizationRemarkAnalysisAliasing(
7674                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7675                    OrigLoop->getHeader())
7676                << "loop not vectorized: cannot prove it is safe to reorder "
7677                   "memory operations";
7678       });
7679       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7680       Hints.emitRemarkWithHints();
7681       return VectorizationFactor::Disabled();
7682     }
7683   }
7684   return SelectedVF;
7685 }
7686 
7687 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7688   assert(count_if(VPlans,
7689                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7690              1 &&
7691          "Best VF has not a single VPlan.");
7692 
7693   for (const VPlanPtr &Plan : VPlans) {
7694     if (Plan->hasVF(VF))
7695       return *Plan.get();
7696   }
7697   llvm_unreachable("No plan found!");
7698 }
7699 
7700 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7701   SmallVector<Metadata *, 4> MDs;
7702   // Reserve first location for self reference to the LoopID metadata node.
7703   MDs.push_back(nullptr);
7704   bool IsUnrollMetadata = false;
7705   MDNode *LoopID = L->getLoopID();
7706   if (LoopID) {
7707     // First find existing loop unrolling disable metadata.
7708     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7709       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7710       if (MD) {
7711         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7712         IsUnrollMetadata =
7713             S && S->getString().startswith("llvm.loop.unroll.disable");
7714       }
7715       MDs.push_back(LoopID->getOperand(i));
7716     }
7717   }
7718 
7719   if (!IsUnrollMetadata) {
7720     // Add runtime unroll disable metadata.
7721     LLVMContext &Context = L->getHeader()->getContext();
7722     SmallVector<Metadata *, 1> DisableOperands;
7723     DisableOperands.push_back(
7724         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7725     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7726     MDs.push_back(DisableNode);
7727     MDNode *NewLoopID = MDNode::get(Context, MDs);
7728     // Set operand 0 to refer to the loop id itself.
7729     NewLoopID->replaceOperandWith(0, NewLoopID);
7730     L->setLoopID(NewLoopID);
7731   }
7732 }
7733 
7734 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7735                                            VPlan &BestVPlan,
7736                                            InnerLoopVectorizer &ILV,
7737                                            DominatorTree *DT) {
7738   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7739                     << '\n');
7740 
7741   // Perform the actual loop transformation.
7742 
7743   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7744   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7745   Value *CanonicalIVStartValue;
7746   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7747       ILV.createVectorizedLoopSkeleton();
7748   ILV.collectPoisonGeneratingRecipes(State);
7749 
7750   ILV.printDebugTracesAtStart();
7751 
7752   //===------------------------------------------------===//
7753   //
7754   // Notice: any optimization or new instruction that go
7755   // into the code below should also be implemented in
7756   // the cost-model.
7757   //
7758   //===------------------------------------------------===//
7759 
7760   // 2. Copy and widen instructions from the old loop into the new loop.
7761   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7762                              ILV.getOrCreateVectorTripCount(nullptr),
7763                              CanonicalIVStartValue, State);
7764   BestVPlan.execute(&State);
7765 
7766   // Keep all loop hints from the original loop on the vector loop (we'll
7767   // replace the vectorizer-specific hints below).
7768   MDNode *OrigLoopID = OrigLoop->getLoopID();
7769 
7770   Optional<MDNode *> VectorizedLoopID =
7771       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7772                                       LLVMLoopVectorizeFollowupVectorized});
7773 
7774   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7775   if (VectorizedLoopID.hasValue())
7776     L->setLoopID(VectorizedLoopID.getValue());
7777   else {
7778     // Keep all loop hints from the original loop on the vector loop (we'll
7779     // replace the vectorizer-specific hints below).
7780     if (MDNode *LID = OrigLoop->getLoopID())
7781       L->setLoopID(LID);
7782 
7783     LoopVectorizeHints Hints(L, true, *ORE);
7784     Hints.setAlreadyVectorized();
7785   }
7786   // Disable runtime unrolling when vectorizing the epilogue loop.
7787   if (CanonicalIVStartValue)
7788     AddRuntimeUnrollDisableMetaData(L);
7789 
7790   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7791   //    predication, updating analyses.
7792   ILV.fixVectorizedLoop(State);
7793 
7794   ILV.printDebugTracesAtEnd();
7795 }
7796 
7797 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7798 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7799   for (const auto &Plan : VPlans)
7800     if (PrintVPlansInDotFormat)
7801       Plan->printDOT(O);
7802     else
7803       Plan->print(O);
7804 }
7805 #endif
7806 
7807 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7808     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7809 
7810   // We create new control-flow for the vectorized loop, so the original exit
7811   // conditions will be dead after vectorization if it's only used by the
7812   // terminator
7813   SmallVector<BasicBlock*> ExitingBlocks;
7814   OrigLoop->getExitingBlocks(ExitingBlocks);
7815   for (auto *BB : ExitingBlocks) {
7816     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7817     if (!Cmp || !Cmp->hasOneUse())
7818       continue;
7819 
7820     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7821     if (!DeadInstructions.insert(Cmp).second)
7822       continue;
7823 
7824     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7825     // TODO: can recurse through operands in general
7826     for (Value *Op : Cmp->operands()) {
7827       if (isa<TruncInst>(Op) && Op->hasOneUse())
7828           DeadInstructions.insert(cast<Instruction>(Op));
7829     }
7830   }
7831 
7832   // We create new "steps" for induction variable updates to which the original
7833   // induction variables map. An original update instruction will be dead if
7834   // all its users except the induction variable are dead.
7835   auto *Latch = OrigLoop->getLoopLatch();
7836   for (auto &Induction : Legal->getInductionVars()) {
7837     PHINode *Ind = Induction.first;
7838     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7839 
7840     // If the tail is to be folded by masking, the primary induction variable,
7841     // if exists, isn't dead: it will be used for masking. Don't kill it.
7842     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7843       continue;
7844 
7845     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7846           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7847         }))
7848       DeadInstructions.insert(IndUpdate);
7849   }
7850 }
7851 
7852 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7853 
7854 //===--------------------------------------------------------------------===//
7855 // EpilogueVectorizerMainLoop
7856 //===--------------------------------------------------------------------===//
7857 
7858 /// This function is partially responsible for generating the control flow
7859 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7860 std::pair<BasicBlock *, Value *>
7861 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7862   MDNode *OrigLoopID = OrigLoop->getLoopID();
7863   Loop *Lp = createVectorLoopSkeleton("");
7864 
7865   // Generate the code to check the minimum iteration count of the vector
7866   // epilogue (see below).
7867   EPI.EpilogueIterationCountCheck =
7868       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
7869   EPI.EpilogueIterationCountCheck->setName("iter.check");
7870 
7871   // Generate the code to check any assumptions that we've made for SCEV
7872   // expressions.
7873   EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader);
7874 
7875   // Generate the code that checks at runtime if arrays overlap. We put the
7876   // checks into a separate block to make the more common case of few elements
7877   // faster.
7878   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
7879 
7880   // Generate the iteration count check for the main loop, *after* the check
7881   // for the epilogue loop, so that the path-length is shorter for the case
7882   // that goes directly through the vector epilogue. The longer-path length for
7883   // the main loop is compensated for, by the gain from vectorizing the larger
7884   // trip count. Note: the branch will get updated later on when we vectorize
7885   // the epilogue.
7886   EPI.MainLoopIterationCountCheck =
7887       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
7888 
7889   // Generate the induction variable.
7890   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7891   EPI.VectorTripCount = CountRoundDown;
7892   createHeaderBranch(Lp);
7893 
7894   // Skip induction resume value creation here because they will be created in
7895   // the second pass. If we created them here, they wouldn't be used anyway,
7896   // because the vplan in the second pass still contains the inductions from the
7897   // original loop.
7898 
7899   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
7900 }
7901 
7902 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7903   LLVM_DEBUG({
7904     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7905            << "Main Loop VF:" << EPI.MainLoopVF
7906            << ", Main Loop UF:" << EPI.MainLoopUF
7907            << ", Epilogue Loop VF:" << EPI.EpilogueVF
7908            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7909   });
7910 }
7911 
7912 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7913   DEBUG_WITH_TYPE(VerboseDebug, {
7914     dbgs() << "intermediate fn:\n"
7915            << *OrigLoop->getHeader()->getParent() << "\n";
7916   });
7917 }
7918 
7919 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
7920     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
7921   assert(L && "Expected valid Loop.");
7922   assert(Bypass && "Expected valid bypass basic block.");
7923   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
7924   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
7925   Value *Count = getOrCreateTripCount(L);
7926   // Reuse existing vector loop preheader for TC checks.
7927   // Note that new preheader block is generated for vector loop.
7928   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
7929   IRBuilder<> Builder(TCCheckBlock->getTerminator());
7930 
7931   // Generate code to check if the loop's trip count is less than VF * UF of the
7932   // main vector loop.
7933   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
7934       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7935 
7936   Value *CheckMinIters = Builder.CreateICmp(
7937       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
7938       "min.iters.check");
7939 
7940   if (!ForEpilogue)
7941     TCCheckBlock->setName("vector.main.loop.iter.check");
7942 
7943   // Create new preheader for vector loop.
7944   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7945                                    DT, LI, nullptr, "vector.ph");
7946 
7947   if (ForEpilogue) {
7948     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
7949                                  DT->getNode(Bypass)->getIDom()) &&
7950            "TC check is expected to dominate Bypass");
7951 
7952     // Update dominator for Bypass & LoopExit.
7953     DT->changeImmediateDominator(Bypass, TCCheckBlock);
7954     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7955       // For loops with multiple exits, there's no edge from the middle block
7956       // to exit blocks (as the epilogue must run) and thus no need to update
7957       // the immediate dominator of the exit blocks.
7958       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
7959 
7960     LoopBypassBlocks.push_back(TCCheckBlock);
7961 
7962     // Save the trip count so we don't have to regenerate it in the
7963     // vec.epilog.iter.check. This is safe to do because the trip count
7964     // generated here dominates the vector epilog iter check.
7965     EPI.TripCount = Count;
7966   }
7967 
7968   ReplaceInstWithInst(
7969       TCCheckBlock->getTerminator(),
7970       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7971 
7972   return TCCheckBlock;
7973 }
7974 
7975 //===--------------------------------------------------------------------===//
7976 // EpilogueVectorizerEpilogueLoop
7977 //===--------------------------------------------------------------------===//
7978 
7979 /// This function is partially responsible for generating the control flow
7980 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7981 std::pair<BasicBlock *, Value *>
7982 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
7983   MDNode *OrigLoopID = OrigLoop->getLoopID();
7984   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
7985 
7986   // Now, compare the remaining count and if there aren't enough iterations to
7987   // execute the vectorized epilogue skip to the scalar part.
7988   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
7989   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
7990   LoopVectorPreHeader =
7991       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
7992                  LI, nullptr, "vec.epilog.ph");
7993   emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader,
7994                                           VecEpilogueIterationCountCheck);
7995 
7996   // Adjust the control flow taking the state info from the main loop
7997   // vectorization into account.
7998   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7999          "expected this to be saved from the previous pass.");
8000   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8001       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8002 
8003   DT->changeImmediateDominator(LoopVectorPreHeader,
8004                                EPI.MainLoopIterationCountCheck);
8005 
8006   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8007       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8008 
8009   if (EPI.SCEVSafetyCheck)
8010     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8011         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8012   if (EPI.MemSafetyCheck)
8013     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8014         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8015 
8016   DT->changeImmediateDominator(
8017       VecEpilogueIterationCountCheck,
8018       VecEpilogueIterationCountCheck->getSinglePredecessor());
8019 
8020   DT->changeImmediateDominator(LoopScalarPreHeader,
8021                                EPI.EpilogueIterationCountCheck);
8022   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8023     // If there is an epilogue which must run, there's no edge from the
8024     // middle block to exit blocks  and thus no need to update the immediate
8025     // dominator of the exit blocks.
8026     DT->changeImmediateDominator(LoopExitBlock,
8027                                  EPI.EpilogueIterationCountCheck);
8028 
8029   // Keep track of bypass blocks, as they feed start values to the induction
8030   // phis in the scalar loop preheader.
8031   if (EPI.SCEVSafetyCheck)
8032     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8033   if (EPI.MemSafetyCheck)
8034     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8035   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8036 
8037   // The vec.epilog.iter.check block may contain Phi nodes from reductions which
8038   // merge control-flow from the latch block and the middle block. Update the
8039   // incoming values here and move the Phi into the preheader.
8040   SmallVector<PHINode *, 4> PhisInBlock;
8041   for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
8042     PhisInBlock.push_back(&Phi);
8043 
8044   for (PHINode *Phi : PhisInBlock) {
8045     Phi->replaceIncomingBlockWith(
8046         VecEpilogueIterationCountCheck->getSinglePredecessor(),
8047         VecEpilogueIterationCountCheck);
8048     Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
8049     if (EPI.SCEVSafetyCheck)
8050       Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
8051     if (EPI.MemSafetyCheck)
8052       Phi->removeIncomingValue(EPI.MemSafetyCheck);
8053     Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
8054   }
8055 
8056   // Generate a resume induction for the vector epilogue and put it in the
8057   // vector epilogue preheader
8058   Type *IdxTy = Legal->getWidestInductionType();
8059   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8060                                          LoopVectorPreHeader->getFirstNonPHI());
8061   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8062   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8063                            EPI.MainLoopIterationCountCheck);
8064 
8065   // Generate the induction variable.
8066   createHeaderBranch(Lp);
8067 
8068   // Generate induction resume values. These variables save the new starting
8069   // indexes for the scalar loop. They are used to test if there are any tail
8070   // iterations left once the vector loop has completed.
8071   // Note that when the vectorized epilogue is skipped due to iteration count
8072   // check, then the resume value for the induction variable comes from
8073   // the trip count of the main vector loop, hence passing the AdditionalBypass
8074   // argument.
8075   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8076                                    EPI.VectorTripCount} /* AdditionalBypass */);
8077 
8078   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8079 }
8080 
8081 BasicBlock *
8082 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8083     BasicBlock *Bypass, BasicBlock *Insert) {
8084 
8085   assert(EPI.TripCount &&
8086          "Expected trip count to have been safed in the first pass.");
8087   assert(
8088       (!isa<Instruction>(EPI.TripCount) ||
8089        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8090       "saved trip count does not dominate insertion point.");
8091   Value *TC = EPI.TripCount;
8092   IRBuilder<> Builder(Insert->getTerminator());
8093   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8094 
8095   // Generate code to check if the loop's trip count is less than VF * UF of the
8096   // vector epilogue loop.
8097   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8098       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8099 
8100   Value *CheckMinIters =
8101       Builder.CreateICmp(P, Count,
8102                          createStepForVF(Builder, Count->getType(),
8103                                          EPI.EpilogueVF, EPI.EpilogueUF),
8104                          "min.epilog.iters.check");
8105 
8106   ReplaceInstWithInst(
8107       Insert->getTerminator(),
8108       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8109 
8110   LoopBypassBlocks.push_back(Insert);
8111   return Insert;
8112 }
8113 
8114 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8115   LLVM_DEBUG({
8116     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8117            << "Epilogue Loop VF:" << EPI.EpilogueVF
8118            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8119   });
8120 }
8121 
8122 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8123   DEBUG_WITH_TYPE(VerboseDebug, {
8124     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8125   });
8126 }
8127 
8128 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8129     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8130   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8131   bool PredicateAtRangeStart = Predicate(Range.Start);
8132 
8133   for (ElementCount TmpVF = Range.Start * 2;
8134        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8135     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8136       Range.End = TmpVF;
8137       break;
8138     }
8139 
8140   return PredicateAtRangeStart;
8141 }
8142 
8143 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8144 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8145 /// of VF's starting at a given VF and extending it as much as possible. Each
8146 /// vectorization decision can potentially shorten this sub-range during
8147 /// buildVPlan().
8148 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8149                                            ElementCount MaxVF) {
8150   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8151   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8152     VFRange SubRange = {VF, MaxVFPlusOne};
8153     VPlans.push_back(buildVPlan(SubRange));
8154     VF = SubRange.End;
8155   }
8156 }
8157 
8158 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8159                                          VPlanPtr &Plan) {
8160   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8161 
8162   // Look for cached value.
8163   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8164   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8165   if (ECEntryIt != EdgeMaskCache.end())
8166     return ECEntryIt->second;
8167 
8168   VPValue *SrcMask = createBlockInMask(Src, Plan);
8169 
8170   // The terminator has to be a branch inst!
8171   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8172   assert(BI && "Unexpected terminator found");
8173 
8174   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8175     return EdgeMaskCache[Edge] = SrcMask;
8176 
8177   // If source is an exiting block, we know the exit edge is dynamically dead
8178   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8179   // adding uses of an otherwise potentially dead instruction.
8180   if (OrigLoop->isLoopExiting(Src))
8181     return EdgeMaskCache[Edge] = SrcMask;
8182 
8183   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8184   assert(EdgeMask && "No Edge Mask found for condition");
8185 
8186   if (BI->getSuccessor(0) != Dst)
8187     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8188 
8189   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8190     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8191     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8192     // The select version does not introduce new UB if SrcMask is false and
8193     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8194     VPValue *False = Plan->getOrAddVPValue(
8195         ConstantInt::getFalse(BI->getCondition()->getType()));
8196     EdgeMask =
8197         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8198   }
8199 
8200   return EdgeMaskCache[Edge] = EdgeMask;
8201 }
8202 
8203 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8204   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8205 
8206   // Look for cached value.
8207   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8208   if (BCEntryIt != BlockMaskCache.end())
8209     return BCEntryIt->second;
8210 
8211   // All-one mask is modelled as no-mask following the convention for masked
8212   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8213   VPValue *BlockMask = nullptr;
8214 
8215   if (OrigLoop->getHeader() == BB) {
8216     if (!CM.blockNeedsPredicationForAnyReason(BB))
8217       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8218 
8219     // Introduce the early-exit compare IV <= BTC to form header block mask.
8220     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8221     // constructing the desired canonical IV in the header block as its first
8222     // non-phi instructions.
8223     assert(CM.foldTailByMasking() && "must fold the tail");
8224     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8225     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8226     auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8227     HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
8228 
8229     VPBuilder::InsertPointGuard Guard(Builder);
8230     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8231     if (CM.TTI.emitGetActiveLaneMask()) {
8232       VPValue *TC = Plan->getOrCreateTripCount();
8233       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8234     } else {
8235       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8236       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8237     }
8238     return BlockMaskCache[BB] = BlockMask;
8239   }
8240 
8241   // This is the block mask. We OR all incoming edges.
8242   for (auto *Predecessor : predecessors(BB)) {
8243     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8244     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8245       return BlockMaskCache[BB] = EdgeMask;
8246 
8247     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8248       BlockMask = EdgeMask;
8249       continue;
8250     }
8251 
8252     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8253   }
8254 
8255   return BlockMaskCache[BB] = BlockMask;
8256 }
8257 
8258 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8259                                                 ArrayRef<VPValue *> Operands,
8260                                                 VFRange &Range,
8261                                                 VPlanPtr &Plan) {
8262   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8263          "Must be called with either a load or store");
8264 
8265   auto willWiden = [&](ElementCount VF) -> bool {
8266     if (VF.isScalar())
8267       return false;
8268     LoopVectorizationCostModel::InstWidening Decision =
8269         CM.getWideningDecision(I, VF);
8270     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8271            "CM decision should be taken at this point.");
8272     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8273       return true;
8274     if (CM.isScalarAfterVectorization(I, VF) ||
8275         CM.isProfitableToScalarize(I, VF))
8276       return false;
8277     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8278   };
8279 
8280   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8281     return nullptr;
8282 
8283   VPValue *Mask = nullptr;
8284   if (Legal->isMaskRequired(I))
8285     Mask = createBlockInMask(I->getParent(), Plan);
8286 
8287   // Determine if the pointer operand of the access is either consecutive or
8288   // reverse consecutive.
8289   LoopVectorizationCostModel::InstWidening Decision =
8290       CM.getWideningDecision(I, Range.Start);
8291   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8292   bool Consecutive =
8293       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8294 
8295   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8296     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8297                                               Consecutive, Reverse);
8298 
8299   StoreInst *Store = cast<StoreInst>(I);
8300   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8301                                             Mask, Consecutive, Reverse);
8302 }
8303 
8304 static VPWidenIntOrFpInductionRecipe *
8305 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc,
8306                            VPValue *Start, const InductionDescriptor &IndDesc,
8307                            LoopVectorizationCostModel &CM, ScalarEvolution &SE,
8308                            Loop &OrigLoop, VFRange &Range) {
8309   // Returns true if an instruction \p I should be scalarized instead of
8310   // vectorized for the chosen vectorization factor.
8311   auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
8312     return CM.isScalarAfterVectorization(I, VF) ||
8313            CM.isProfitableToScalarize(I, VF);
8314   };
8315 
8316   bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange(
8317       [&](ElementCount VF) {
8318         // Returns true if we should generate a scalar version of \p IV.
8319         if (ShouldScalarizeInstruction(PhiOrTrunc, VF))
8320           return true;
8321         auto isScalarInst = [&](User *U) -> bool {
8322           auto *I = cast<Instruction>(U);
8323           return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF);
8324         };
8325         return any_of(PhiOrTrunc->users(), isScalarInst);
8326       },
8327       Range);
8328   bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
8329       [&](ElementCount VF) {
8330         return ShouldScalarizeInstruction(PhiOrTrunc, VF);
8331       },
8332       Range);
8333   assert(IndDesc.getStartValue() ==
8334          Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
8335   assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
8336          "step must be loop invariant");
8337   if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8338     return new VPWidenIntOrFpInductionRecipe(
8339         Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE);
8340   }
8341   assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
8342   return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV,
8343                                            !NeedsScalarIVOnly, SE);
8344 }
8345 
8346 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
8347     PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const {
8348 
8349   // Check if this is an integer or fp induction. If so, build the recipe that
8350   // produces its scalar and vector values.
8351   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
8352     return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM,
8353                                       *PSE.getSE(), *OrigLoop, Range);
8354 
8355   return nullptr;
8356 }
8357 
8358 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8359     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8360     VPlan &Plan) const {
8361   // Optimize the special case where the source is a constant integer
8362   // induction variable. Notice that we can only optimize the 'trunc' case
8363   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8364   // (c) other casts depend on pointer size.
8365 
8366   // Determine whether \p K is a truncation based on an induction variable that
8367   // can be optimized.
8368   auto isOptimizableIVTruncate =
8369       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8370     return [=](ElementCount VF) -> bool {
8371       return CM.isOptimizableIVTruncate(K, VF);
8372     };
8373   };
8374 
8375   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8376           isOptimizableIVTruncate(I), Range)) {
8377 
8378     auto *Phi = cast<PHINode>(I->getOperand(0));
8379     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8380     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8381     return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(),
8382                                       *OrigLoop, Range);
8383   }
8384   return nullptr;
8385 }
8386 
8387 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8388                                                 ArrayRef<VPValue *> Operands,
8389                                                 VPlanPtr &Plan) {
8390   // If all incoming values are equal, the incoming VPValue can be used directly
8391   // instead of creating a new VPBlendRecipe.
8392   VPValue *FirstIncoming = Operands[0];
8393   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8394         return FirstIncoming == Inc;
8395       })) {
8396     return Operands[0];
8397   }
8398 
8399   unsigned NumIncoming = Phi->getNumIncomingValues();
8400   // For in-loop reductions, we do not need to create an additional select.
8401   VPValue *InLoopVal = nullptr;
8402   for (unsigned In = 0; In < NumIncoming; In++) {
8403     PHINode *PhiOp =
8404         dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue());
8405     if (PhiOp && CM.isInLoopReduction(PhiOp)) {
8406       assert(!InLoopVal && "Found more than one in-loop reduction!");
8407       InLoopVal = Operands[In];
8408     }
8409   }
8410 
8411   assert((!InLoopVal || NumIncoming == 2) &&
8412          "Found an in-loop reduction for PHI with unexpected number of "
8413          "incoming values");
8414   if (InLoopVal)
8415     return Operands[Operands[0] == InLoopVal ? 1 : 0];
8416 
8417   // We know that all PHIs in non-header blocks are converted into selects, so
8418   // we don't have to worry about the insertion order and we can just use the
8419   // builder. At this point we generate the predication tree. There may be
8420   // duplications since this is a simple recursive scan, but future
8421   // optimizations will clean it up.
8422   SmallVector<VPValue *, 2> OperandsWithMask;
8423 
8424   for (unsigned In = 0; In < NumIncoming; In++) {
8425     VPValue *EdgeMask =
8426       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8427     assert((EdgeMask || NumIncoming == 1) &&
8428            "Multiple predecessors with one having a full mask");
8429     OperandsWithMask.push_back(Operands[In]);
8430     if (EdgeMask)
8431       OperandsWithMask.push_back(EdgeMask);
8432   }
8433   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8434 }
8435 
8436 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8437                                                    ArrayRef<VPValue *> Operands,
8438                                                    VFRange &Range) const {
8439 
8440   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8441       [this, CI](ElementCount VF) {
8442         return CM.isScalarWithPredication(CI, VF);
8443       },
8444       Range);
8445 
8446   if (IsPredicated)
8447     return nullptr;
8448 
8449   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8450   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8451              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8452              ID == Intrinsic::pseudoprobe ||
8453              ID == Intrinsic::experimental_noalias_scope_decl))
8454     return nullptr;
8455 
8456   auto willWiden = [&](ElementCount VF) -> bool {
8457     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8458     // The following case may be scalarized depending on the VF.
8459     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8460     // version of the instruction.
8461     // Is it beneficial to perform intrinsic call compared to lib call?
8462     bool NeedToScalarize = false;
8463     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8464     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8465     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8466     return UseVectorIntrinsic || !NeedToScalarize;
8467   };
8468 
8469   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8470     return nullptr;
8471 
8472   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8473   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8474 }
8475 
8476 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8477   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8478          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8479   // Instruction should be widened, unless it is scalar after vectorization,
8480   // scalarization is profitable or it is predicated.
8481   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8482     return CM.isScalarAfterVectorization(I, VF) ||
8483            CM.isProfitableToScalarize(I, VF) ||
8484            CM.isScalarWithPredication(I, VF);
8485   };
8486   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8487                                                              Range);
8488 }
8489 
8490 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8491                                            ArrayRef<VPValue *> Operands) const {
8492   auto IsVectorizableOpcode = [](unsigned Opcode) {
8493     switch (Opcode) {
8494     case Instruction::Add:
8495     case Instruction::And:
8496     case Instruction::AShr:
8497     case Instruction::BitCast:
8498     case Instruction::FAdd:
8499     case Instruction::FCmp:
8500     case Instruction::FDiv:
8501     case Instruction::FMul:
8502     case Instruction::FNeg:
8503     case Instruction::FPExt:
8504     case Instruction::FPToSI:
8505     case Instruction::FPToUI:
8506     case Instruction::FPTrunc:
8507     case Instruction::FRem:
8508     case Instruction::FSub:
8509     case Instruction::ICmp:
8510     case Instruction::IntToPtr:
8511     case Instruction::LShr:
8512     case Instruction::Mul:
8513     case Instruction::Or:
8514     case Instruction::PtrToInt:
8515     case Instruction::SDiv:
8516     case Instruction::Select:
8517     case Instruction::SExt:
8518     case Instruction::Shl:
8519     case Instruction::SIToFP:
8520     case Instruction::SRem:
8521     case Instruction::Sub:
8522     case Instruction::Trunc:
8523     case Instruction::UDiv:
8524     case Instruction::UIToFP:
8525     case Instruction::URem:
8526     case Instruction::Xor:
8527     case Instruction::ZExt:
8528       return true;
8529     }
8530     return false;
8531   };
8532 
8533   if (!IsVectorizableOpcode(I->getOpcode()))
8534     return nullptr;
8535 
8536   // Success: widen this instruction.
8537   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8538 }
8539 
8540 void VPRecipeBuilder::fixHeaderPhis() {
8541   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8542   for (VPHeaderPHIRecipe *R : PhisToFix) {
8543     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8544     VPRecipeBase *IncR =
8545         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8546     R->addOperand(IncR->getVPSingleValue());
8547   }
8548 }
8549 
8550 VPBasicBlock *VPRecipeBuilder::handleReplication(
8551     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8552     VPlanPtr &Plan) {
8553   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8554       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8555       Range);
8556 
8557   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8558       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8559       Range);
8560 
8561   // Even if the instruction is not marked as uniform, there are certain
8562   // intrinsic calls that can be effectively treated as such, so we check for
8563   // them here. Conservatively, we only do this for scalable vectors, since
8564   // for fixed-width VFs we can always fall back on full scalarization.
8565   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8566     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8567     case Intrinsic::assume:
8568     case Intrinsic::lifetime_start:
8569     case Intrinsic::lifetime_end:
8570       // For scalable vectors if one of the operands is variant then we still
8571       // want to mark as uniform, which will generate one instruction for just
8572       // the first lane of the vector. We can't scalarize the call in the same
8573       // way as for fixed-width vectors because we don't know how many lanes
8574       // there are.
8575       //
8576       // The reasons for doing it this way for scalable vectors are:
8577       //   1. For the assume intrinsic generating the instruction for the first
8578       //      lane is still be better than not generating any at all. For
8579       //      example, the input may be a splat across all lanes.
8580       //   2. For the lifetime start/end intrinsics the pointer operand only
8581       //      does anything useful when the input comes from a stack object,
8582       //      which suggests it should always be uniform. For non-stack objects
8583       //      the effect is to poison the object, which still allows us to
8584       //      remove the call.
8585       IsUniform = true;
8586       break;
8587     default:
8588       break;
8589     }
8590   }
8591 
8592   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8593                                        IsUniform, IsPredicated);
8594   setRecipe(I, Recipe);
8595   Plan->addVPValue(I, Recipe);
8596 
8597   // Find if I uses a predicated instruction. If so, it will use its scalar
8598   // value. Avoid hoisting the insert-element which packs the scalar value into
8599   // a vector value, as that happens iff all users use the vector value.
8600   for (VPValue *Op : Recipe->operands()) {
8601     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8602     if (!PredR)
8603       continue;
8604     auto *RepR =
8605         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8606     assert(RepR->isPredicated() &&
8607            "expected Replicate recipe to be predicated");
8608     RepR->setAlsoPack(false);
8609   }
8610 
8611   // Finalize the recipe for Instr, first if it is not predicated.
8612   if (!IsPredicated) {
8613     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8614     VPBB->appendRecipe(Recipe);
8615     return VPBB;
8616   }
8617   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8618 
8619   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8620   assert(SingleSucc && "VPBB must have a single successor when handling "
8621                        "predicated replication.");
8622   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8623   // Record predicated instructions for above packing optimizations.
8624   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8625   VPBlockUtils::insertBlockAfter(Region, VPBB);
8626   auto *RegSucc = new VPBasicBlock();
8627   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8628   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8629   return RegSucc;
8630 }
8631 
8632 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8633                                                       VPRecipeBase *PredRecipe,
8634                                                       VPlanPtr &Plan) {
8635   // Instructions marked for predication are replicated and placed under an
8636   // if-then construct to prevent side-effects.
8637 
8638   // Generate recipes to compute the block mask for this region.
8639   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8640 
8641   // Build the triangular if-then region.
8642   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8643   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8644   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8645   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8646   auto *PHIRecipe = Instr->getType()->isVoidTy()
8647                         ? nullptr
8648                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8649   if (PHIRecipe) {
8650     Plan->removeVPValueFor(Instr);
8651     Plan->addVPValue(Instr, PHIRecipe);
8652   }
8653   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8654   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8655   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8656 
8657   // Note: first set Entry as region entry and then connect successors starting
8658   // from it in order, to propagate the "parent" of each VPBasicBlock.
8659   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8660   VPBlockUtils::connectBlocks(Pred, Exit);
8661 
8662   return Region;
8663 }
8664 
8665 VPRecipeOrVPValueTy
8666 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8667                                         ArrayRef<VPValue *> Operands,
8668                                         VFRange &Range, VPlanPtr &Plan) {
8669   // First, check for specific widening recipes that deal with calls, memory
8670   // operations, inductions and Phi nodes.
8671   if (auto *CI = dyn_cast<CallInst>(Instr))
8672     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8673 
8674   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8675     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8676 
8677   VPRecipeBase *Recipe;
8678   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8679     if (Phi->getParent() != OrigLoop->getHeader())
8680       return tryToBlend(Phi, Operands, Plan);
8681     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8682       return toVPRecipeResult(Recipe);
8683 
8684     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8685     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8686       VPValue *StartV = Operands[0];
8687       if (Legal->isReductionVariable(Phi)) {
8688         const RecurrenceDescriptor &RdxDesc =
8689             Legal->getReductionVars().find(Phi)->second;
8690         assert(RdxDesc.getRecurrenceStartValue() ==
8691                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8692         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8693                                              CM.isInLoopReduction(Phi),
8694                                              CM.useOrderedReductions(RdxDesc));
8695       } else {
8696         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8697       }
8698 
8699       // Record the incoming value from the backedge, so we can add the incoming
8700       // value from the backedge after all recipes have been created.
8701       recordRecipeOf(cast<Instruction>(
8702           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8703       PhisToFix.push_back(PhiRecipe);
8704     } else {
8705       // TODO: record backedge value for remaining pointer induction phis.
8706       assert(Phi->getType()->isPointerTy() &&
8707              "only pointer phis should be handled here");
8708       assert(Legal->getInductionVars().count(Phi) &&
8709              "Not an induction variable");
8710       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8711       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8712       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8713     }
8714 
8715     return toVPRecipeResult(PhiRecipe);
8716   }
8717 
8718   if (isa<TruncInst>(Instr) &&
8719       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8720                                                Range, *Plan)))
8721     return toVPRecipeResult(Recipe);
8722 
8723   if (!shouldWiden(Instr, Range))
8724     return nullptr;
8725 
8726   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8727     return toVPRecipeResult(new VPWidenGEPRecipe(
8728         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8729 
8730   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8731     bool InvariantCond =
8732         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8733     return toVPRecipeResult(new VPWidenSelectRecipe(
8734         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8735   }
8736 
8737   return toVPRecipeResult(tryToWiden(Instr, Operands));
8738 }
8739 
8740 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8741                                                         ElementCount MaxVF) {
8742   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8743 
8744   // Collect instructions from the original loop that will become trivially dead
8745   // in the vectorized loop. We don't need to vectorize these instructions. For
8746   // example, original induction update instructions can become dead because we
8747   // separately emit induction "steps" when generating code for the new loop.
8748   // Similarly, we create a new latch condition when setting up the structure
8749   // of the new loop, so the old one can become dead.
8750   SmallPtrSet<Instruction *, 4> DeadInstructions;
8751   collectTriviallyDeadInstructions(DeadInstructions);
8752 
8753   // Add assume instructions we need to drop to DeadInstructions, to prevent
8754   // them from being added to the VPlan.
8755   // TODO: We only need to drop assumes in blocks that get flattend. If the
8756   // control flow is preserved, we should keep them.
8757   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8758   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8759 
8760   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8761   // Dead instructions do not need sinking. Remove them from SinkAfter.
8762   for (Instruction *I : DeadInstructions)
8763     SinkAfter.erase(I);
8764 
8765   // Cannot sink instructions after dead instructions (there won't be any
8766   // recipes for them). Instead, find the first non-dead previous instruction.
8767   for (auto &P : Legal->getSinkAfter()) {
8768     Instruction *SinkTarget = P.second;
8769     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8770     (void)FirstInst;
8771     while (DeadInstructions.contains(SinkTarget)) {
8772       assert(
8773           SinkTarget != FirstInst &&
8774           "Must find a live instruction (at least the one feeding the "
8775           "first-order recurrence PHI) before reaching beginning of the block");
8776       SinkTarget = SinkTarget->getPrevNode();
8777       assert(SinkTarget != P.first &&
8778              "sink source equals target, no sinking required");
8779     }
8780     P.second = SinkTarget;
8781   }
8782 
8783   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8784   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8785     VFRange SubRange = {VF, MaxVFPlusOne};
8786     VPlans.push_back(
8787         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8788     VF = SubRange.End;
8789   }
8790 }
8791 
8792 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8793 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8794 // BranchOnCount VPInstruction to the latch.
8795 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8796                                   bool HasNUW, bool IsVPlanNative) {
8797   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8798   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8799 
8800   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8801   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8802   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8803   if (IsVPlanNative)
8804     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8805   Header->insert(CanonicalIVPHI, Header->begin());
8806 
8807   auto *CanonicalIVIncrement =
8808       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8809                                : VPInstruction::CanonicalIVIncrement,
8810                         {CanonicalIVPHI}, DL);
8811   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8812 
8813   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8814   if (IsVPlanNative) {
8815     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8816     EB->setCondBit(nullptr);
8817   }
8818   EB->appendRecipe(CanonicalIVIncrement);
8819 
8820   auto *BranchOnCount =
8821       new VPInstruction(VPInstruction::BranchOnCount,
8822                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8823   EB->appendRecipe(BranchOnCount);
8824 }
8825 
8826 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8827     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8828     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8829 
8830   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8831 
8832   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8833 
8834   // ---------------------------------------------------------------------------
8835   // Pre-construction: record ingredients whose recipes we'll need to further
8836   // process after constructing the initial VPlan.
8837   // ---------------------------------------------------------------------------
8838 
8839   // Mark instructions we'll need to sink later and their targets as
8840   // ingredients whose recipe we'll need to record.
8841   for (auto &Entry : SinkAfter) {
8842     RecipeBuilder.recordRecipeOf(Entry.first);
8843     RecipeBuilder.recordRecipeOf(Entry.second);
8844   }
8845   for (auto &Reduction : CM.getInLoopReductionChains()) {
8846     PHINode *Phi = Reduction.first;
8847     RecurKind Kind =
8848         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8849     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8850 
8851     RecipeBuilder.recordRecipeOf(Phi);
8852     for (auto &R : ReductionOperations) {
8853       RecipeBuilder.recordRecipeOf(R);
8854       // For min/max reducitons, where we have a pair of icmp/select, we also
8855       // need to record the ICmp recipe, so it can be removed later.
8856       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8857              "Only min/max recurrences allowed for inloop reductions");
8858       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8859         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8860     }
8861   }
8862 
8863   // For each interleave group which is relevant for this (possibly trimmed)
8864   // Range, add it to the set of groups to be later applied to the VPlan and add
8865   // placeholders for its members' Recipes which we'll be replacing with a
8866   // single VPInterleaveRecipe.
8867   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8868     auto applyIG = [IG, this](ElementCount VF) -> bool {
8869       return (VF.isVector() && // Query is illegal for VF == 1
8870               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8871                   LoopVectorizationCostModel::CM_Interleave);
8872     };
8873     if (!getDecisionAndClampRange(applyIG, Range))
8874       continue;
8875     InterleaveGroups.insert(IG);
8876     for (unsigned i = 0; i < IG->getFactor(); i++)
8877       if (Instruction *Member = IG->getMember(i))
8878         RecipeBuilder.recordRecipeOf(Member);
8879   };
8880 
8881   // ---------------------------------------------------------------------------
8882   // Build initial VPlan: Scan the body of the loop in a topological order to
8883   // visit each basic block after having visited its predecessor basic blocks.
8884   // ---------------------------------------------------------------------------
8885 
8886   // Create initial VPlan skeleton, with separate header and latch blocks.
8887   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
8888   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
8889   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
8890   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
8891   auto Plan = std::make_unique<VPlan>(TopRegion);
8892 
8893   Instruction *DLInst =
8894       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
8895   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
8896                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
8897                         !CM.foldTailByMasking(), false);
8898 
8899   // Scan the body of the loop in a topological order to visit each basic block
8900   // after having visited its predecessor basic blocks.
8901   LoopBlocksDFS DFS(OrigLoop);
8902   DFS.perform(LI);
8903 
8904   VPBasicBlock *VPBB = HeaderVPBB;
8905   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
8906   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8907     // Relevant instructions from basic block BB will be grouped into VPRecipe
8908     // ingredients and fill a new VPBasicBlock.
8909     unsigned VPBBsForBB = 0;
8910     VPBB->setName(BB->getName());
8911     Builder.setInsertPoint(VPBB);
8912 
8913     // Introduce each ingredient into VPlan.
8914     // TODO: Model and preserve debug instrinsics in VPlan.
8915     for (Instruction &I : BB->instructionsWithoutDebug()) {
8916       Instruction *Instr = &I;
8917 
8918       // First filter out irrelevant instructions, to ensure no recipes are
8919       // built for them.
8920       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8921         continue;
8922 
8923       SmallVector<VPValue *, 4> Operands;
8924       auto *Phi = dyn_cast<PHINode>(Instr);
8925       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
8926         Operands.push_back(Plan->getOrAddVPValue(
8927             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
8928       } else {
8929         auto OpRange = Plan->mapToVPValues(Instr->operands());
8930         Operands = {OpRange.begin(), OpRange.end()};
8931       }
8932       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
8933               Instr, Operands, Range, Plan)) {
8934         // If Instr can be simplified to an existing VPValue, use it.
8935         if (RecipeOrValue.is<VPValue *>()) {
8936           auto *VPV = RecipeOrValue.get<VPValue *>();
8937           Plan->addVPValue(Instr, VPV);
8938           // If the re-used value is a recipe, register the recipe for the
8939           // instruction, in case the recipe for Instr needs to be recorded.
8940           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
8941             RecipeBuilder.setRecipe(Instr, R);
8942           continue;
8943         }
8944         // Otherwise, add the new recipe.
8945         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8946         for (auto *Def : Recipe->definedValues()) {
8947           auto *UV = Def->getUnderlyingValue();
8948           Plan->addVPValue(UV, Def);
8949         }
8950 
8951         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
8952             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
8953           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
8954           // of the header block. That can happen for truncates of induction
8955           // variables. Those recipes are moved to the phi section of the header
8956           // block after applying SinkAfter, which relies on the original
8957           // position of the trunc.
8958           assert(isa<TruncInst>(Instr));
8959           InductionsToMove.push_back(
8960               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
8961         }
8962         RecipeBuilder.setRecipe(Instr, Recipe);
8963         VPBB->appendRecipe(Recipe);
8964         continue;
8965       }
8966 
8967       // Otherwise, if all widening options failed, Instruction is to be
8968       // replicated. This may create a successor for VPBB.
8969       VPBasicBlock *NextVPBB =
8970           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
8971       if (NextVPBB != VPBB) {
8972         VPBB = NextVPBB;
8973         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8974                                     : "");
8975       }
8976     }
8977 
8978     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
8979     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
8980   }
8981 
8982   // Fold the last, empty block into its predecessor.
8983   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
8984   assert(VPBB && "expected to fold last (empty) block");
8985   // After here, VPBB should not be used.
8986   VPBB = nullptr;
8987 
8988   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
8989          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
8990          "entry block must be set to a VPRegionBlock having a non-empty entry "
8991          "VPBasicBlock");
8992   RecipeBuilder.fixHeaderPhis();
8993 
8994   // ---------------------------------------------------------------------------
8995   // Transform initial VPlan: Apply previously taken decisions, in order, to
8996   // bring the VPlan to its final state.
8997   // ---------------------------------------------------------------------------
8998 
8999   // Apply Sink-After legal constraints.
9000   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9001     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9002     if (Region && Region->isReplicator()) {
9003       assert(Region->getNumSuccessors() == 1 &&
9004              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9005       assert(R->getParent()->size() == 1 &&
9006              "A recipe in an original replicator region must be the only "
9007              "recipe in its block");
9008       return Region;
9009     }
9010     return nullptr;
9011   };
9012   for (auto &Entry : SinkAfter) {
9013     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9014     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9015 
9016     auto *TargetRegion = GetReplicateRegion(Target);
9017     auto *SinkRegion = GetReplicateRegion(Sink);
9018     if (!SinkRegion) {
9019       // If the sink source is not a replicate region, sink the recipe directly.
9020       if (TargetRegion) {
9021         // The target is in a replication region, make sure to move Sink to
9022         // the block after it, not into the replication region itself.
9023         VPBasicBlock *NextBlock =
9024             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9025         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9026       } else
9027         Sink->moveAfter(Target);
9028       continue;
9029     }
9030 
9031     // The sink source is in a replicate region. Unhook the region from the CFG.
9032     auto *SinkPred = SinkRegion->getSinglePredecessor();
9033     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9034     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9035     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9036     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9037 
9038     if (TargetRegion) {
9039       // The target recipe is also in a replicate region, move the sink region
9040       // after the target region.
9041       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9042       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9043       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9044       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9045     } else {
9046       // The sink source is in a replicate region, we need to move the whole
9047       // replicate region, which should only contain a single recipe in the
9048       // main block.
9049       auto *SplitBlock =
9050           Target->getParent()->splitAt(std::next(Target->getIterator()));
9051 
9052       auto *SplitPred = SplitBlock->getSinglePredecessor();
9053 
9054       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9055       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9056       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9057     }
9058   }
9059 
9060   VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
9061   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9062 
9063   // Now that sink-after is done, move induction recipes for optimized truncates
9064   // to the phi section of the header block.
9065   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9066     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9067 
9068   // Adjust the recipes for any inloop reductions.
9069   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9070                              RecipeBuilder, Range.Start);
9071 
9072   // Introduce a recipe to combine the incoming and previous values of a
9073   // first-order recurrence.
9074   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9075     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9076     if (!RecurPhi)
9077       continue;
9078 
9079     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9080     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9081     auto *Region = GetReplicateRegion(PrevRecipe);
9082     if (Region)
9083       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9084     if (Region || PrevRecipe->isPhi())
9085       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9086     else
9087       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9088 
9089     auto *RecurSplice = cast<VPInstruction>(
9090         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9091                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9092 
9093     RecurPhi->replaceAllUsesWith(RecurSplice);
9094     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9095     // all users.
9096     RecurSplice->setOperand(0, RecurPhi);
9097   }
9098 
9099   // Interleave memory: for each Interleave Group we marked earlier as relevant
9100   // for this VPlan, replace the Recipes widening its memory instructions with a
9101   // single VPInterleaveRecipe at its insertion point.
9102   for (auto IG : InterleaveGroups) {
9103     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9104         RecipeBuilder.getRecipe(IG->getInsertPos()));
9105     SmallVector<VPValue *, 4> StoredValues;
9106     for (unsigned i = 0; i < IG->getFactor(); ++i)
9107       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9108         auto *StoreR =
9109             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9110         StoredValues.push_back(StoreR->getStoredValue());
9111       }
9112 
9113     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9114                                         Recipe->getMask());
9115     VPIG->insertBefore(Recipe);
9116     unsigned J = 0;
9117     for (unsigned i = 0; i < IG->getFactor(); ++i)
9118       if (Instruction *Member = IG->getMember(i)) {
9119         if (!Member->getType()->isVoidTy()) {
9120           VPValue *OriginalV = Plan->getVPValue(Member);
9121           Plan->removeVPValueFor(Member);
9122           Plan->addVPValue(Member, VPIG->getVPValue(J));
9123           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9124           J++;
9125         }
9126         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9127       }
9128   }
9129 
9130   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9131   // in ways that accessing values using original IR values is incorrect.
9132   Plan->disableValue2VPValue();
9133 
9134   VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
9135   VPlanTransforms::sinkScalarOperands(*Plan);
9136   VPlanTransforms::mergeReplicateRegions(*Plan);
9137   VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop);
9138 
9139   std::string PlanName;
9140   raw_string_ostream RSO(PlanName);
9141   ElementCount VF = Range.Start;
9142   Plan->addVF(VF);
9143   RSO << "Initial VPlan for VF={" << VF;
9144   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9145     Plan->addVF(VF);
9146     RSO << "," << VF;
9147   }
9148   RSO << "},UF>=1";
9149   RSO.flush();
9150   Plan->setName(PlanName);
9151 
9152   // Fold Exit block into its predecessor if possible.
9153   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9154   // VPBasicBlock as exit.
9155   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9156 
9157   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9158   return Plan;
9159 }
9160 
9161 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9162   // Outer loop handling: They may require CFG and instruction level
9163   // transformations before even evaluating whether vectorization is profitable.
9164   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9165   // the vectorization pipeline.
9166   assert(!OrigLoop->isInnermost());
9167   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9168 
9169   // Create new empty VPlan
9170   auto Plan = std::make_unique<VPlan>();
9171 
9172   // Build hierarchical CFG
9173   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9174   HCFGBuilder.buildHierarchicalCFG();
9175 
9176   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9177        VF *= 2)
9178     Plan->addVF(VF);
9179 
9180   if (EnableVPlanPredication) {
9181     VPlanPredicator VPP(*Plan);
9182     VPP.predicate();
9183 
9184     // Avoid running transformation to recipes until masked code generation in
9185     // VPlan-native path is in place.
9186     return Plan;
9187   }
9188 
9189   SmallPtrSet<Instruction *, 1> DeadInstructions;
9190   VPlanTransforms::VPInstructionsToVPRecipes(
9191       OrigLoop, Plan,
9192       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9193       DeadInstructions, *PSE.getSE());
9194 
9195   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9196                         true, true);
9197   return Plan;
9198 }
9199 
9200 // Adjust the recipes for reductions. For in-loop reductions the chain of
9201 // instructions leading from the loop exit instr to the phi need to be converted
9202 // to reductions, with one operand being vector and the other being the scalar
9203 // reduction chain. For other reductions, a select is introduced between the phi
9204 // and live-out recipes when folding the tail.
9205 void LoopVectorizationPlanner::adjustRecipesForReductions(
9206     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9207     ElementCount MinVF) {
9208   for (auto &Reduction : CM.getInLoopReductionChains()) {
9209     PHINode *Phi = Reduction.first;
9210     const RecurrenceDescriptor &RdxDesc =
9211         Legal->getReductionVars().find(Phi)->second;
9212     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9213 
9214     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9215       continue;
9216 
9217     // ReductionOperations are orders top-down from the phi's use to the
9218     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9219     // which of the two operands will remain scalar and which will be reduced.
9220     // For minmax the chain will be the select instructions.
9221     Instruction *Chain = Phi;
9222     for (Instruction *R : ReductionOperations) {
9223       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9224       RecurKind Kind = RdxDesc.getRecurrenceKind();
9225 
9226       VPValue *ChainOp = Plan->getVPValue(Chain);
9227       unsigned FirstOpId;
9228       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9229              "Only min/max recurrences allowed for inloop reductions");
9230       // Recognize a call to the llvm.fmuladd intrinsic.
9231       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9232       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9233              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9234       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9235         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9236                "Expected to replace a VPWidenSelectSC");
9237         FirstOpId = 1;
9238       } else {
9239         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9240                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9241                "Expected to replace a VPWidenSC");
9242         FirstOpId = 0;
9243       }
9244       unsigned VecOpId =
9245           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9246       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9247 
9248       auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent())
9249                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9250                          : nullptr;
9251 
9252       if (IsFMulAdd) {
9253         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9254         // need to create an fmul recipe to use as the vector operand for the
9255         // fadd reduction.
9256         VPInstruction *FMulRecipe = new VPInstruction(
9257             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9258         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9259         WidenRecipe->getParent()->insert(FMulRecipe,
9260                                          WidenRecipe->getIterator());
9261         VecOp = FMulRecipe;
9262       }
9263       VPReductionRecipe *RedRecipe =
9264           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9265       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9266       Plan->removeVPValueFor(R);
9267       Plan->addVPValue(R, RedRecipe);
9268       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9269       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9270       WidenRecipe->eraseFromParent();
9271 
9272       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9273         VPRecipeBase *CompareRecipe =
9274             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9275         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9276                "Expected to replace a VPWidenSC");
9277         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9278                "Expected no remaining users");
9279         CompareRecipe->eraseFromParent();
9280       }
9281       Chain = R;
9282     }
9283   }
9284 
9285   // If tail is folded by masking, introduce selects between the phi
9286   // and the live-out instruction of each reduction, at the beginning of the
9287   // dedicated latch block.
9288   if (CM.foldTailByMasking()) {
9289     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9290     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9291       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9292       if (!PhiR || PhiR->isInLoop())
9293         continue;
9294       VPValue *Cond =
9295           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9296       VPValue *Red = PhiR->getBackedgeValue();
9297       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9298              "reduction recipe must be defined before latch");
9299       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9300     }
9301   }
9302 }
9303 
9304 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9305 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9306                                VPSlotTracker &SlotTracker) const {
9307   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9308   IG->getInsertPos()->printAsOperand(O, false);
9309   O << ", ";
9310   getAddr()->printAsOperand(O, SlotTracker);
9311   VPValue *Mask = getMask();
9312   if (Mask) {
9313     O << ", ";
9314     Mask->printAsOperand(O, SlotTracker);
9315   }
9316 
9317   unsigned OpIdx = 0;
9318   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9319     if (!IG->getMember(i))
9320       continue;
9321     if (getNumStoreOperands() > 0) {
9322       O << "\n" << Indent << "  store ";
9323       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9324       O << " to index " << i;
9325     } else {
9326       O << "\n" << Indent << "  ";
9327       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9328       O << " = load from index " << i;
9329     }
9330     ++OpIdx;
9331   }
9332 }
9333 #endif
9334 
9335 void VPWidenCallRecipe::execute(VPTransformState &State) {
9336   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9337                                   *this, State);
9338 }
9339 
9340 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9341   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9342   State.ILV->setDebugLocFromInst(&I);
9343 
9344   // The condition can be loop invariant  but still defined inside the
9345   // loop. This means that we can't just use the original 'cond' value.
9346   // We have to take the 'vectorized' value and pick the first lane.
9347   // Instcombine will make this a no-op.
9348   auto *InvarCond =
9349       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9350 
9351   for (unsigned Part = 0; Part < State.UF; ++Part) {
9352     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9353     Value *Op0 = State.get(getOperand(1), Part);
9354     Value *Op1 = State.get(getOperand(2), Part);
9355     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9356     State.set(this, Sel, Part);
9357     State.ILV->addMetadata(Sel, &I);
9358   }
9359 }
9360 
9361 void VPWidenRecipe::execute(VPTransformState &State) {
9362   auto &I = *cast<Instruction>(getUnderlyingValue());
9363   auto &Builder = State.Builder;
9364   switch (I.getOpcode()) {
9365   case Instruction::Call:
9366   case Instruction::Br:
9367   case Instruction::PHI:
9368   case Instruction::GetElementPtr:
9369   case Instruction::Select:
9370     llvm_unreachable("This instruction is handled by a different recipe.");
9371   case Instruction::UDiv:
9372   case Instruction::SDiv:
9373   case Instruction::SRem:
9374   case Instruction::URem:
9375   case Instruction::Add:
9376   case Instruction::FAdd:
9377   case Instruction::Sub:
9378   case Instruction::FSub:
9379   case Instruction::FNeg:
9380   case Instruction::Mul:
9381   case Instruction::FMul:
9382   case Instruction::FDiv:
9383   case Instruction::FRem:
9384   case Instruction::Shl:
9385   case Instruction::LShr:
9386   case Instruction::AShr:
9387   case Instruction::And:
9388   case Instruction::Or:
9389   case Instruction::Xor: {
9390     // Just widen unops and binops.
9391     State.ILV->setDebugLocFromInst(&I);
9392 
9393     for (unsigned Part = 0; Part < State.UF; ++Part) {
9394       SmallVector<Value *, 2> Ops;
9395       for (VPValue *VPOp : operands())
9396         Ops.push_back(State.get(VPOp, Part));
9397 
9398       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9399 
9400       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9401         VecOp->copyIRFlags(&I);
9402 
9403         // If the instruction is vectorized and was in a basic block that needed
9404         // predication, we can't propagate poison-generating flags (nuw/nsw,
9405         // exact, etc.). The control flow has been linearized and the
9406         // instruction is no longer guarded by the predicate, which could make
9407         // the flag properties to no longer hold.
9408         if (State.MayGeneratePoisonRecipes.contains(this))
9409           VecOp->dropPoisonGeneratingFlags();
9410       }
9411 
9412       // Use this vector value for all users of the original instruction.
9413       State.set(this, V, Part);
9414       State.ILV->addMetadata(V, &I);
9415     }
9416 
9417     break;
9418   }
9419   case Instruction::ICmp:
9420   case Instruction::FCmp: {
9421     // Widen compares. Generate vector compares.
9422     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9423     auto *Cmp = cast<CmpInst>(&I);
9424     State.ILV->setDebugLocFromInst(Cmp);
9425     for (unsigned Part = 0; Part < State.UF; ++Part) {
9426       Value *A = State.get(getOperand(0), Part);
9427       Value *B = State.get(getOperand(1), Part);
9428       Value *C = nullptr;
9429       if (FCmp) {
9430         // Propagate fast math flags.
9431         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9432         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9433         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9434       } else {
9435         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9436       }
9437       State.set(this, C, Part);
9438       State.ILV->addMetadata(C, &I);
9439     }
9440 
9441     break;
9442   }
9443 
9444   case Instruction::ZExt:
9445   case Instruction::SExt:
9446   case Instruction::FPToUI:
9447   case Instruction::FPToSI:
9448   case Instruction::FPExt:
9449   case Instruction::PtrToInt:
9450   case Instruction::IntToPtr:
9451   case Instruction::SIToFP:
9452   case Instruction::UIToFP:
9453   case Instruction::Trunc:
9454   case Instruction::FPTrunc:
9455   case Instruction::BitCast: {
9456     auto *CI = cast<CastInst>(&I);
9457     State.ILV->setDebugLocFromInst(CI);
9458 
9459     /// Vectorize casts.
9460     Type *DestTy = (State.VF.isScalar())
9461                        ? CI->getType()
9462                        : VectorType::get(CI->getType(), State.VF);
9463 
9464     for (unsigned Part = 0; Part < State.UF; ++Part) {
9465       Value *A = State.get(getOperand(0), Part);
9466       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9467       State.set(this, Cast, Part);
9468       State.ILV->addMetadata(Cast, &I);
9469     }
9470     break;
9471   }
9472   default:
9473     // This instruction is not vectorized by simple widening.
9474     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9475     llvm_unreachable("Unhandled instruction!");
9476   } // end of switch.
9477 }
9478 
9479 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9480   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9481   // Construct a vector GEP by widening the operands of the scalar GEP as
9482   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9483   // results in a vector of pointers when at least one operand of the GEP
9484   // is vector-typed. Thus, to keep the representation compact, we only use
9485   // vector-typed operands for loop-varying values.
9486 
9487   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9488     // If we are vectorizing, but the GEP has only loop-invariant operands,
9489     // the GEP we build (by only using vector-typed operands for
9490     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9491     // produce a vector of pointers, we need to either arbitrarily pick an
9492     // operand to broadcast, or broadcast a clone of the original GEP.
9493     // Here, we broadcast a clone of the original.
9494     //
9495     // TODO: If at some point we decide to scalarize instructions having
9496     //       loop-invariant operands, this special case will no longer be
9497     //       required. We would add the scalarization decision to
9498     //       collectLoopScalars() and teach getVectorValue() to broadcast
9499     //       the lane-zero scalar value.
9500     auto *Clone = State.Builder.Insert(GEP->clone());
9501     for (unsigned Part = 0; Part < State.UF; ++Part) {
9502       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9503       State.set(this, EntryPart, Part);
9504       State.ILV->addMetadata(EntryPart, GEP);
9505     }
9506   } else {
9507     // If the GEP has at least one loop-varying operand, we are sure to
9508     // produce a vector of pointers. But if we are only unrolling, we want
9509     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9510     // produce with the code below will be scalar (if VF == 1) or vector
9511     // (otherwise). Note that for the unroll-only case, we still maintain
9512     // values in the vector mapping with initVector, as we do for other
9513     // instructions.
9514     for (unsigned Part = 0; Part < State.UF; ++Part) {
9515       // The pointer operand of the new GEP. If it's loop-invariant, we
9516       // won't broadcast it.
9517       auto *Ptr = IsPtrLoopInvariant
9518                       ? State.get(getOperand(0), VPIteration(0, 0))
9519                       : State.get(getOperand(0), Part);
9520 
9521       // Collect all the indices for the new GEP. If any index is
9522       // loop-invariant, we won't broadcast it.
9523       SmallVector<Value *, 4> Indices;
9524       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9525         VPValue *Operand = getOperand(I);
9526         if (IsIndexLoopInvariant[I - 1])
9527           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9528         else
9529           Indices.push_back(State.get(Operand, Part));
9530       }
9531 
9532       // If the GEP instruction is vectorized and was in a basic block that
9533       // needed predication, we can't propagate the poison-generating 'inbounds'
9534       // flag. The control flow has been linearized and the GEP is no longer
9535       // guarded by the predicate, which could make the 'inbounds' properties to
9536       // no longer hold.
9537       bool IsInBounds =
9538           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9539 
9540       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9541       // but it should be a vector, otherwise.
9542       auto *NewGEP = IsInBounds
9543                          ? State.Builder.CreateInBoundsGEP(
9544                                GEP->getSourceElementType(), Ptr, Indices)
9545                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9546                                                    Ptr, Indices);
9547       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9548              "NewGEP is not a pointer vector");
9549       State.set(this, NewGEP, Part);
9550       State.ILV->addMetadata(NewGEP, GEP);
9551     }
9552   }
9553 }
9554 
9555 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9556   assert(!State.Instance && "Int or FP induction being replicated.");
9557 
9558   Value *Start = getStartValue()->getLiveInIRValue();
9559   const InductionDescriptor &ID = getInductionDescriptor();
9560   TruncInst *Trunc = getTruncInst();
9561   IRBuilderBase &Builder = State.Builder;
9562   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
9563   assert(State.VF.isVector() && "must have vector VF");
9564 
9565   // The value from the original loop to which we are mapping the new induction
9566   // variable.
9567   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
9568 
9569   auto &DL = EntryVal->getModule()->getDataLayout();
9570 
9571   // Generate code for the induction step. Note that induction steps are
9572   // required to be loop-invariant
9573   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
9574     if (SE.isSCEVable(IV->getType())) {
9575       SCEVExpander Exp(SE, DL, "induction");
9576       return Exp.expandCodeFor(Step, Step->getType(),
9577                                State.CFG.VectorPreHeader->getTerminator());
9578     }
9579     return cast<SCEVUnknown>(Step)->getValue();
9580   };
9581 
9582   // Fast-math-flags propagate from the original induction instruction.
9583   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9584   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
9585     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
9586 
9587   // Now do the actual transformations, and start with creating the step value.
9588   Value *Step = CreateStepValue(ID.getStep());
9589 
9590   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
9591          "Expected either an induction phi-node or a truncate of it!");
9592 
9593   // Construct the initial value of the vector IV in the vector loop preheader
9594   auto CurrIP = Builder.saveIP();
9595   Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator());
9596   if (isa<TruncInst>(EntryVal)) {
9597     assert(Start->getType()->isIntegerTy() &&
9598            "Truncation requires an integer type");
9599     auto *TruncType = cast<IntegerType>(EntryVal->getType());
9600     Step = Builder.CreateTrunc(Step, TruncType);
9601     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
9602   }
9603 
9604   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
9605   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
9606   Value *SteppedStart = getStepVector(
9607       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
9608 
9609   // We create vector phi nodes for both integer and floating-point induction
9610   // variables. Here, we determine the kind of arithmetic we will perform.
9611   Instruction::BinaryOps AddOp;
9612   Instruction::BinaryOps MulOp;
9613   if (Step->getType()->isIntegerTy()) {
9614     AddOp = Instruction::Add;
9615     MulOp = Instruction::Mul;
9616   } else {
9617     AddOp = ID.getInductionOpcode();
9618     MulOp = Instruction::FMul;
9619   }
9620 
9621   // Multiply the vectorization factor by the step using integer or
9622   // floating-point arithmetic as appropriate.
9623   Type *StepType = Step->getType();
9624   Value *RuntimeVF;
9625   if (Step->getType()->isFloatingPointTy())
9626     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
9627   else
9628     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
9629   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
9630 
9631   // Create a vector splat to use in the induction update.
9632   //
9633   // FIXME: If the step is non-constant, we create the vector splat with
9634   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
9635   //        handle a constant vector splat.
9636   Value *SplatVF = isa<Constant>(Mul)
9637                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
9638                        : Builder.CreateVectorSplat(State.VF, Mul);
9639   Builder.restoreIP(CurrIP);
9640 
9641   // We may need to add the step a number of times, depending on the unroll
9642   // factor. The last of those goes into the PHI.
9643   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
9644                                     &*State.CFG.PrevBB->getFirstInsertionPt());
9645   VecInd->setDebugLoc(EntryVal->getDebugLoc());
9646   Instruction *LastInduction = VecInd;
9647   for (unsigned Part = 0; Part < State.UF; ++Part) {
9648     State.set(this, LastInduction, Part);
9649 
9650     if (isa<TruncInst>(EntryVal))
9651       State.ILV->addMetadata(LastInduction, EntryVal);
9652 
9653     LastInduction = cast<Instruction>(
9654         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
9655     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
9656   }
9657 
9658   // Move the last step to the end of the latch block. This ensures consistent
9659   // placement of all induction updates.
9660   auto *LoopVectorLatch =
9661       State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
9662   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
9663   LastInduction->moveBefore(Br);
9664   LastInduction->setName("vec.ind.next");
9665 
9666   VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader);
9667   VecInd->addIncoming(LastInduction, LoopVectorLatch);
9668 }
9669 
9670 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
9671   assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
9672 
9673   // Fast-math-flags propagate from the original induction instruction.
9674   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9675   if (IndDesc.getInductionBinOp() &&
9676       isa<FPMathOperator>(IndDesc.getInductionBinOp()))
9677     State.Builder.setFastMathFlags(
9678         IndDesc.getInductionBinOp()->getFastMathFlags());
9679 
9680   Value *Step = State.get(getStepValue(), VPIteration(0, 0));
9681   auto CreateScalarIV = [&](Value *&Step) -> Value * {
9682     Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
9683     auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9684     if (!isCanonical() || CanonicalIV->getType() != Ty) {
9685       ScalarIV =
9686           Ty->isIntegerTy()
9687               ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty)
9688               : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty);
9689       ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
9690                                       getStartValue()->getLiveInIRValue(), Step,
9691                                       IndDesc);
9692       ScalarIV->setName("offset.idx");
9693     }
9694     if (TruncToTy) {
9695       assert(Step->getType()->isIntegerTy() &&
9696              "Truncation requires an integer step");
9697       ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy);
9698       Step = State.Builder.CreateTrunc(Step, TruncToTy);
9699     }
9700     return ScalarIV;
9701   };
9702 
9703   Value *ScalarIV = CreateScalarIV(Step);
9704   if (State.VF.isVector()) {
9705     buildScalarSteps(ScalarIV, Step, IndDesc, this, State);
9706     return;
9707   }
9708 
9709   for (unsigned Part = 0; Part < State.UF; ++Part) {
9710     assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
9711     Value *EntryPart;
9712     if (Step->getType()->isFloatingPointTy()) {
9713       Value *StartIdx =
9714           getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part);
9715       // Floating-point operations inherit FMF via the builder's flags.
9716       Value *MulOp = State.Builder.CreateFMul(StartIdx, Step);
9717       EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(),
9718                                             ScalarIV, MulOp);
9719     } else {
9720       Value *StartIdx =
9721           getRuntimeVF(State.Builder, Step->getType(), State.VF * Part);
9722       EntryPart = State.Builder.CreateAdd(
9723           ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction");
9724     }
9725     State.set(this, EntryPart, Part);
9726   }
9727 }
9728 
9729 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9730   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9731                                  State);
9732 }
9733 
9734 void VPBlendRecipe::execute(VPTransformState &State) {
9735   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9736   // We know that all PHIs in non-header blocks are converted into
9737   // selects, so we don't have to worry about the insertion order and we
9738   // can just use the builder.
9739   // At this point we generate the predication tree. There may be
9740   // duplications since this is a simple recursive scan, but future
9741   // optimizations will clean it up.
9742 
9743   unsigned NumIncoming = getNumIncomingValues();
9744 
9745   // Generate a sequence of selects of the form:
9746   // SELECT(Mask3, In3,
9747   //        SELECT(Mask2, In2,
9748   //               SELECT(Mask1, In1,
9749   //                      In0)))
9750   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9751   // are essentially undef are taken from In0.
9752   InnerLoopVectorizer::VectorParts Entry(State.UF);
9753   for (unsigned In = 0; In < NumIncoming; ++In) {
9754     for (unsigned Part = 0; Part < State.UF; ++Part) {
9755       // We might have single edge PHIs (blocks) - use an identity
9756       // 'select' for the first PHI operand.
9757       Value *In0 = State.get(getIncomingValue(In), Part);
9758       if (In == 0)
9759         Entry[Part] = In0; // Initialize with the first incoming value.
9760       else {
9761         // Select between the current value and the previous incoming edge
9762         // based on the incoming mask.
9763         Value *Cond = State.get(getMask(In), Part);
9764         Entry[Part] =
9765             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9766       }
9767     }
9768   }
9769   for (unsigned Part = 0; Part < State.UF; ++Part)
9770     State.set(this, Entry[Part], Part);
9771 }
9772 
9773 void VPInterleaveRecipe::execute(VPTransformState &State) {
9774   assert(!State.Instance && "Interleave group being replicated.");
9775   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9776                                       getStoredValues(), getMask());
9777 }
9778 
9779 void VPReductionRecipe::execute(VPTransformState &State) {
9780   assert(!State.Instance && "Reduction being replicated.");
9781   Value *PrevInChain = State.get(getChainOp(), 0);
9782   RecurKind Kind = RdxDesc->getRecurrenceKind();
9783   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9784   // Propagate the fast-math flags carried by the underlying instruction.
9785   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9786   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9787   for (unsigned Part = 0; Part < State.UF; ++Part) {
9788     Value *NewVecOp = State.get(getVecOp(), Part);
9789     if (VPValue *Cond = getCondOp()) {
9790       Value *NewCond = State.get(Cond, Part);
9791       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9792       Value *Iden = RdxDesc->getRecurrenceIdentity(
9793           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9794       Value *IdenVec =
9795           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9796       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9797       NewVecOp = Select;
9798     }
9799     Value *NewRed;
9800     Value *NextInChain;
9801     if (IsOrdered) {
9802       if (State.VF.isVector())
9803         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9804                                         PrevInChain);
9805       else
9806         NewRed = State.Builder.CreateBinOp(
9807             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9808             NewVecOp);
9809       PrevInChain = NewRed;
9810     } else {
9811       PrevInChain = State.get(getChainOp(), Part);
9812       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9813     }
9814     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9815       NextInChain =
9816           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9817                          NewRed, PrevInChain);
9818     } else if (IsOrdered)
9819       NextInChain = NewRed;
9820     else
9821       NextInChain = State.Builder.CreateBinOp(
9822           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9823           PrevInChain);
9824     State.set(this, NextInChain, Part);
9825   }
9826 }
9827 
9828 void VPReplicateRecipe::execute(VPTransformState &State) {
9829   if (State.Instance) { // Generate a single instance.
9830     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9831     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9832                                     IsPredicated, State);
9833     // Insert scalar instance packing it into a vector.
9834     if (AlsoPack && State.VF.isVector()) {
9835       // If we're constructing lane 0, initialize to start from poison.
9836       if (State.Instance->Lane.isFirstLane()) {
9837         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9838         Value *Poison = PoisonValue::get(
9839             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9840         State.set(this, Poison, State.Instance->Part);
9841       }
9842       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9843     }
9844     return;
9845   }
9846 
9847   // Generate scalar instances for all VF lanes of all UF parts, unless the
9848   // instruction is uniform inwhich case generate only the first lane for each
9849   // of the UF parts.
9850   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9851   assert((!State.VF.isScalable() || IsUniform) &&
9852          "Can't scalarize a scalable vector");
9853   for (unsigned Part = 0; Part < State.UF; ++Part)
9854     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9855       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9856                                       VPIteration(Part, Lane), IsPredicated,
9857                                       State);
9858 }
9859 
9860 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9861   assert(State.Instance && "Branch on Mask works only on single instance.");
9862 
9863   unsigned Part = State.Instance->Part;
9864   unsigned Lane = State.Instance->Lane.getKnownLane();
9865 
9866   Value *ConditionBit = nullptr;
9867   VPValue *BlockInMask = getMask();
9868   if (BlockInMask) {
9869     ConditionBit = State.get(BlockInMask, Part);
9870     if (ConditionBit->getType()->isVectorTy())
9871       ConditionBit = State.Builder.CreateExtractElement(
9872           ConditionBit, State.Builder.getInt32(Lane));
9873   } else // Block in mask is all-one.
9874     ConditionBit = State.Builder.getTrue();
9875 
9876   // Replace the temporary unreachable terminator with a new conditional branch,
9877   // whose two destinations will be set later when they are created.
9878   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9879   assert(isa<UnreachableInst>(CurrentTerminator) &&
9880          "Expected to replace unreachable terminator with conditional branch.");
9881   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9882   CondBr->setSuccessor(0, nullptr);
9883   ReplaceInstWithInst(CurrentTerminator, CondBr);
9884 }
9885 
9886 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9887   assert(State.Instance && "Predicated instruction PHI works per instance.");
9888   Instruction *ScalarPredInst =
9889       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9890   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9891   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9892   assert(PredicatingBB && "Predicated block has no single predecessor.");
9893   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9894          "operand must be VPReplicateRecipe");
9895 
9896   // By current pack/unpack logic we need to generate only a single phi node: if
9897   // a vector value for the predicated instruction exists at this point it means
9898   // the instruction has vector users only, and a phi for the vector value is
9899   // needed. In this case the recipe of the predicated instruction is marked to
9900   // also do that packing, thereby "hoisting" the insert-element sequence.
9901   // Otherwise, a phi node for the scalar value is needed.
9902   unsigned Part = State.Instance->Part;
9903   if (State.hasVectorValue(getOperand(0), Part)) {
9904     Value *VectorValue = State.get(getOperand(0), Part);
9905     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9906     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9907     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9908     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9909     if (State.hasVectorValue(this, Part))
9910       State.reset(this, VPhi, Part);
9911     else
9912       State.set(this, VPhi, Part);
9913     // NOTE: Currently we need to update the value of the operand, so the next
9914     // predicated iteration inserts its generated value in the correct vector.
9915     State.reset(getOperand(0), VPhi, Part);
9916   } else {
9917     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9918     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9919     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9920                      PredicatingBB);
9921     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9922     if (State.hasScalarValue(this, *State.Instance))
9923       State.reset(this, Phi, *State.Instance);
9924     else
9925       State.set(this, Phi, *State.Instance);
9926     // NOTE: Currently we need to update the value of the operand, so the next
9927     // predicated iteration inserts its generated value in the correct vector.
9928     State.reset(getOperand(0), Phi, *State.Instance);
9929   }
9930 }
9931 
9932 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9933   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9934 
9935   // Attempt to issue a wide load.
9936   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9937   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9938 
9939   assert((LI || SI) && "Invalid Load/Store instruction");
9940   assert((!SI || StoredValue) && "No stored value provided for widened store");
9941   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9942 
9943   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9944 
9945   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9946   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9947   bool CreateGatherScatter = !Consecutive;
9948 
9949   auto &Builder = State.Builder;
9950   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9951   bool isMaskRequired = getMask();
9952   if (isMaskRequired)
9953     for (unsigned Part = 0; Part < State.UF; ++Part)
9954       BlockInMaskParts[Part] = State.get(getMask(), Part);
9955 
9956   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9957     // Calculate the pointer for the specific unroll-part.
9958     GetElementPtrInst *PartPtr = nullptr;
9959 
9960     bool InBounds = false;
9961     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9962       InBounds = gep->isInBounds();
9963     if (Reverse) {
9964       // If the address is consecutive but reversed, then the
9965       // wide store needs to start at the last vector element.
9966       // RunTimeVF =  VScale * VF.getKnownMinValue()
9967       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9968       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9969       // NumElt = -Part * RunTimeVF
9970       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9971       // LastLane = 1 - RunTimeVF
9972       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9973       PartPtr =
9974           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9975       PartPtr->setIsInBounds(InBounds);
9976       PartPtr = cast<GetElementPtrInst>(
9977           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9978       PartPtr->setIsInBounds(InBounds);
9979       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9980         BlockInMaskParts[Part] =
9981             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9982     } else {
9983       Value *Increment =
9984           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9985       PartPtr = cast<GetElementPtrInst>(
9986           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9987       PartPtr->setIsInBounds(InBounds);
9988     }
9989 
9990     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9991     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9992   };
9993 
9994   // Handle Stores:
9995   if (SI) {
9996     State.ILV->setDebugLocFromInst(SI);
9997 
9998     for (unsigned Part = 0; Part < State.UF; ++Part) {
9999       Instruction *NewSI = nullptr;
10000       Value *StoredVal = State.get(StoredValue, Part);
10001       if (CreateGatherScatter) {
10002         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10003         Value *VectorGep = State.get(getAddr(), Part);
10004         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
10005                                             MaskPart);
10006       } else {
10007         if (Reverse) {
10008           // If we store to reverse consecutive memory locations, then we need
10009           // to reverse the order of elements in the stored value.
10010           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
10011           // We don't want to update the value in the map as it might be used in
10012           // another expression. So don't call resetVectorValue(StoredVal).
10013         }
10014         auto *VecPtr =
10015             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10016         if (isMaskRequired)
10017           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
10018                                             BlockInMaskParts[Part]);
10019         else
10020           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10021       }
10022       State.ILV->addMetadata(NewSI, SI);
10023     }
10024     return;
10025   }
10026 
10027   // Handle loads.
10028   assert(LI && "Must have a load instruction");
10029   State.ILV->setDebugLocFromInst(LI);
10030   for (unsigned Part = 0; Part < State.UF; ++Part) {
10031     Value *NewLI;
10032     if (CreateGatherScatter) {
10033       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10034       Value *VectorGep = State.get(getAddr(), Part);
10035       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10036                                          nullptr, "wide.masked.gather");
10037       State.ILV->addMetadata(NewLI, LI);
10038     } else {
10039       auto *VecPtr =
10040           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10041       if (isMaskRequired)
10042         NewLI = Builder.CreateMaskedLoad(
10043             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10044             PoisonValue::get(DataTy), "wide.masked.load");
10045       else
10046         NewLI =
10047             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10048 
10049       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10050       State.ILV->addMetadata(NewLI, LI);
10051       if (Reverse)
10052         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10053     }
10054 
10055     State.set(this, NewLI, Part);
10056   }
10057 }
10058 
10059 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10060 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10061 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10062 // for predication.
10063 static ScalarEpilogueLowering getScalarEpilogueLowering(
10064     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10065     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10066     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10067     LoopVectorizationLegality &LVL) {
10068   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10069   // don't look at hints or options, and don't request a scalar epilogue.
10070   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10071   // LoopAccessInfo (due to code dependency and not being able to reliably get
10072   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10073   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10074   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10075   // back to the old way and vectorize with versioning when forced. See D81345.)
10076   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10077                                                       PGSOQueryType::IRPass) &&
10078                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10079     return CM_ScalarEpilogueNotAllowedOptSize;
10080 
10081   // 2) If set, obey the directives
10082   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10083     switch (PreferPredicateOverEpilogue) {
10084     case PreferPredicateTy::ScalarEpilogue:
10085       return CM_ScalarEpilogueAllowed;
10086     case PreferPredicateTy::PredicateElseScalarEpilogue:
10087       return CM_ScalarEpilogueNotNeededUsePredicate;
10088     case PreferPredicateTy::PredicateOrDontVectorize:
10089       return CM_ScalarEpilogueNotAllowedUsePredicate;
10090     };
10091   }
10092 
10093   // 3) If set, obey the hints
10094   switch (Hints.getPredicate()) {
10095   case LoopVectorizeHints::FK_Enabled:
10096     return CM_ScalarEpilogueNotNeededUsePredicate;
10097   case LoopVectorizeHints::FK_Disabled:
10098     return CM_ScalarEpilogueAllowed;
10099   };
10100 
10101   // 4) if the TTI hook indicates this is profitable, request predication.
10102   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10103                                        LVL.getLAI()))
10104     return CM_ScalarEpilogueNotNeededUsePredicate;
10105 
10106   return CM_ScalarEpilogueAllowed;
10107 }
10108 
10109 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10110   // If Values have been set for this Def return the one relevant for \p Part.
10111   if (hasVectorValue(Def, Part))
10112     return Data.PerPartOutput[Def][Part];
10113 
10114   if (!hasScalarValue(Def, {Part, 0})) {
10115     Value *IRV = Def->getLiveInIRValue();
10116     Value *B = ILV->getBroadcastInstrs(IRV);
10117     set(Def, B, Part);
10118     return B;
10119   }
10120 
10121   Value *ScalarValue = get(Def, {Part, 0});
10122   // If we aren't vectorizing, we can just copy the scalar map values over
10123   // to the vector map.
10124   if (VF.isScalar()) {
10125     set(Def, ScalarValue, Part);
10126     return ScalarValue;
10127   }
10128 
10129   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10130   bool IsUniform = RepR && RepR->isUniform();
10131 
10132   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10133   // Check if there is a scalar value for the selected lane.
10134   if (!hasScalarValue(Def, {Part, LastLane})) {
10135     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10136     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) ||
10137             isa<VPScalarIVStepsRecipe>(Def->getDef())) &&
10138            "unexpected recipe found to be invariant");
10139     IsUniform = true;
10140     LastLane = 0;
10141   }
10142 
10143   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10144   // Set the insert point after the last scalarized instruction or after the
10145   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10146   // will directly follow the scalar definitions.
10147   auto OldIP = Builder.saveIP();
10148   auto NewIP =
10149       isa<PHINode>(LastInst)
10150           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10151           : std::next(BasicBlock::iterator(LastInst));
10152   Builder.SetInsertPoint(&*NewIP);
10153 
10154   // However, if we are vectorizing, we need to construct the vector values.
10155   // If the value is known to be uniform after vectorization, we can just
10156   // broadcast the scalar value corresponding to lane zero for each unroll
10157   // iteration. Otherwise, we construct the vector values using
10158   // insertelement instructions. Since the resulting vectors are stored in
10159   // State, we will only generate the insertelements once.
10160   Value *VectorValue = nullptr;
10161   if (IsUniform) {
10162     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10163     set(Def, VectorValue, Part);
10164   } else {
10165     // Initialize packing with insertelements to start from undef.
10166     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10167     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10168     set(Def, Undef, Part);
10169     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10170       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10171     VectorValue = get(Def, Part);
10172   }
10173   Builder.restoreIP(OldIP);
10174   return VectorValue;
10175 }
10176 
10177 // Process the loop in the VPlan-native vectorization path. This path builds
10178 // VPlan upfront in the vectorization pipeline, which allows to apply
10179 // VPlan-to-VPlan transformations from the very beginning without modifying the
10180 // input LLVM IR.
10181 static bool processLoopInVPlanNativePath(
10182     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10183     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10184     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10185     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10186     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10187     LoopVectorizationRequirements &Requirements) {
10188 
10189   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10190     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10191     return false;
10192   }
10193   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10194   Function *F = L->getHeader()->getParent();
10195   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10196 
10197   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10198       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10199 
10200   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10201                                 &Hints, IAI);
10202   // Use the planner for outer loop vectorization.
10203   // TODO: CM is not used at this point inside the planner. Turn CM into an
10204   // optional argument if we don't need it in the future.
10205   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10206                                Requirements, ORE);
10207 
10208   // Get user vectorization factor.
10209   ElementCount UserVF = Hints.getWidth();
10210 
10211   CM.collectElementTypesForWidening();
10212 
10213   // Plan how to best vectorize, return the best VF and its cost.
10214   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10215 
10216   // If we are stress testing VPlan builds, do not attempt to generate vector
10217   // code. Masked vector code generation support will follow soon.
10218   // Also, do not attempt to vectorize if no vector code will be produced.
10219   if (VPlanBuildStressTest || EnableVPlanPredication ||
10220       VectorizationFactor::Disabled() == VF)
10221     return false;
10222 
10223   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10224 
10225   {
10226     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10227                              F->getParent()->getDataLayout());
10228     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10229                            &CM, BFI, PSI, Checks);
10230     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10231                       << L->getHeader()->getParent()->getName() << "\"\n");
10232     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10233   }
10234 
10235   // Mark the loop as already vectorized to avoid vectorizing again.
10236   Hints.setAlreadyVectorized();
10237   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10238   return true;
10239 }
10240 
10241 // Emit a remark if there are stores to floats that required a floating point
10242 // extension. If the vectorized loop was generated with floating point there
10243 // will be a performance penalty from the conversion overhead and the change in
10244 // the vector width.
10245 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10246   SmallVector<Instruction *, 4> Worklist;
10247   for (BasicBlock *BB : L->getBlocks()) {
10248     for (Instruction &Inst : *BB) {
10249       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10250         if (S->getValueOperand()->getType()->isFloatTy())
10251           Worklist.push_back(S);
10252       }
10253     }
10254   }
10255 
10256   // Traverse the floating point stores upwards searching, for floating point
10257   // conversions.
10258   SmallPtrSet<const Instruction *, 4> Visited;
10259   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10260   while (!Worklist.empty()) {
10261     auto *I = Worklist.pop_back_val();
10262     if (!L->contains(I))
10263       continue;
10264     if (!Visited.insert(I).second)
10265       continue;
10266 
10267     // Emit a remark if the floating point store required a floating
10268     // point conversion.
10269     // TODO: More work could be done to identify the root cause such as a
10270     // constant or a function return type and point the user to it.
10271     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10272       ORE->emit([&]() {
10273         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10274                                           I->getDebugLoc(), L->getHeader())
10275                << "floating point conversion changes vector width. "
10276                << "Mixed floating point precision requires an up/down "
10277                << "cast that will negatively impact performance.";
10278       });
10279 
10280     for (Use &Op : I->operands())
10281       if (auto *OpI = dyn_cast<Instruction>(Op))
10282         Worklist.push_back(OpI);
10283   }
10284 }
10285 
10286 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10287     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10288                                !EnableLoopInterleaving),
10289       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10290                               !EnableLoopVectorization) {}
10291 
10292 bool LoopVectorizePass::processLoop(Loop *L) {
10293   assert((EnableVPlanNativePath || L->isInnermost()) &&
10294          "VPlan-native path is not enabled. Only process inner loops.");
10295 
10296 #ifndef NDEBUG
10297   const std::string DebugLocStr = getDebugLocString(L);
10298 #endif /* NDEBUG */
10299 
10300   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
10301                     << L->getHeader()->getParent()->getName() << "' from "
10302                     << DebugLocStr << "\n");
10303 
10304   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10305 
10306   LLVM_DEBUG(
10307       dbgs() << "LV: Loop hints:"
10308              << " force="
10309              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10310                      ? "disabled"
10311                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10312                             ? "enabled"
10313                             : "?"))
10314              << " width=" << Hints.getWidth()
10315              << " interleave=" << Hints.getInterleave() << "\n");
10316 
10317   // Function containing loop
10318   Function *F = L->getHeader()->getParent();
10319 
10320   // Looking at the diagnostic output is the only way to determine if a loop
10321   // was vectorized (other than looking at the IR or machine code), so it
10322   // is important to generate an optimization remark for each loop. Most of
10323   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10324   // generated as OptimizationRemark and OptimizationRemarkMissed are
10325   // less verbose reporting vectorized loops and unvectorized loops that may
10326   // benefit from vectorization, respectively.
10327 
10328   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10329     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10330     return false;
10331   }
10332 
10333   PredicatedScalarEvolution PSE(*SE, *L);
10334 
10335   // Check if it is legal to vectorize the loop.
10336   LoopVectorizationRequirements Requirements;
10337   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10338                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10339   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10340     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10341     Hints.emitRemarkWithHints();
10342     return false;
10343   }
10344 
10345   // Check the function attributes and profiles to find out if this function
10346   // should be optimized for size.
10347   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10348       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10349 
10350   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10351   // here. They may require CFG and instruction level transformations before
10352   // even evaluating whether vectorization is profitable. Since we cannot modify
10353   // the incoming IR, we need to build VPlan upfront in the vectorization
10354   // pipeline.
10355   if (!L->isInnermost())
10356     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10357                                         ORE, BFI, PSI, Hints, Requirements);
10358 
10359   assert(L->isInnermost() && "Inner loop expected.");
10360 
10361   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10362   // count by optimizing for size, to minimize overheads.
10363   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10364   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10365     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10366                       << "This loop is worth vectorizing only if no scalar "
10367                       << "iteration overheads are incurred.");
10368     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10369       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10370     else {
10371       LLVM_DEBUG(dbgs() << "\n");
10372       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10373     }
10374   }
10375 
10376   // Check the function attributes to see if implicit floats are allowed.
10377   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10378   // an integer loop and the vector instructions selected are purely integer
10379   // vector instructions?
10380   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10381     reportVectorizationFailure(
10382         "Can't vectorize when the NoImplicitFloat attribute is used",
10383         "loop not vectorized due to NoImplicitFloat attribute",
10384         "NoImplicitFloat", ORE, L);
10385     Hints.emitRemarkWithHints();
10386     return false;
10387   }
10388 
10389   // Check if the target supports potentially unsafe FP vectorization.
10390   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10391   // for the target we're vectorizing for, to make sure none of the
10392   // additional fp-math flags can help.
10393   if (Hints.isPotentiallyUnsafe() &&
10394       TTI->isFPVectorizationPotentiallyUnsafe()) {
10395     reportVectorizationFailure(
10396         "Potentially unsafe FP op prevents vectorization",
10397         "loop not vectorized due to unsafe FP support.",
10398         "UnsafeFP", ORE, L);
10399     Hints.emitRemarkWithHints();
10400     return false;
10401   }
10402 
10403   bool AllowOrderedReductions;
10404   // If the flag is set, use that instead and override the TTI behaviour.
10405   if (ForceOrderedReductions.getNumOccurrences() > 0)
10406     AllowOrderedReductions = ForceOrderedReductions;
10407   else
10408     AllowOrderedReductions = TTI->enableOrderedReductions();
10409   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10410     ORE->emit([&]() {
10411       auto *ExactFPMathInst = Requirements.getExactFPInst();
10412       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10413                                                  ExactFPMathInst->getDebugLoc(),
10414                                                  ExactFPMathInst->getParent())
10415              << "loop not vectorized: cannot prove it is safe to reorder "
10416                 "floating-point operations";
10417     });
10418     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10419                          "reorder floating-point operations\n");
10420     Hints.emitRemarkWithHints();
10421     return false;
10422   }
10423 
10424   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10425   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10426 
10427   // If an override option has been passed in for interleaved accesses, use it.
10428   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10429     UseInterleaved = EnableInterleavedMemAccesses;
10430 
10431   // Analyze interleaved memory accesses.
10432   if (UseInterleaved) {
10433     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10434   }
10435 
10436   // Use the cost model.
10437   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10438                                 F, &Hints, IAI);
10439   CM.collectValuesToIgnore();
10440   CM.collectElementTypesForWidening();
10441 
10442   // Use the planner for vectorization.
10443   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10444                                Requirements, ORE);
10445 
10446   // Get user vectorization factor and interleave count.
10447   ElementCount UserVF = Hints.getWidth();
10448   unsigned UserIC = Hints.getInterleave();
10449 
10450   // Plan how to best vectorize, return the best VF and its cost.
10451   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10452 
10453   VectorizationFactor VF = VectorizationFactor::Disabled();
10454   unsigned IC = 1;
10455 
10456   if (MaybeVF) {
10457     VF = *MaybeVF;
10458     // Select the interleave count.
10459     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10460   }
10461 
10462   // Identify the diagnostic messages that should be produced.
10463   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10464   bool VectorizeLoop = true, InterleaveLoop = true;
10465   if (VF.Width.isScalar()) {
10466     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10467     VecDiagMsg = std::make_pair(
10468         "VectorizationNotBeneficial",
10469         "the cost-model indicates that vectorization is not beneficial");
10470     VectorizeLoop = false;
10471   }
10472 
10473   if (!MaybeVF && UserIC > 1) {
10474     // Tell the user interleaving was avoided up-front, despite being explicitly
10475     // requested.
10476     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10477                          "interleaving should be avoided up front\n");
10478     IntDiagMsg = std::make_pair(
10479         "InterleavingAvoided",
10480         "Ignoring UserIC, because interleaving was avoided up front");
10481     InterleaveLoop = false;
10482   } else if (IC == 1 && UserIC <= 1) {
10483     // Tell the user interleaving is not beneficial.
10484     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10485     IntDiagMsg = std::make_pair(
10486         "InterleavingNotBeneficial",
10487         "the cost-model indicates that interleaving is not beneficial");
10488     InterleaveLoop = false;
10489     if (UserIC == 1) {
10490       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10491       IntDiagMsg.second +=
10492           " and is explicitly disabled or interleave count is set to 1";
10493     }
10494   } else if (IC > 1 && UserIC == 1) {
10495     // Tell the user interleaving is beneficial, but it explicitly disabled.
10496     LLVM_DEBUG(
10497         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10498     IntDiagMsg = std::make_pair(
10499         "InterleavingBeneficialButDisabled",
10500         "the cost-model indicates that interleaving is beneficial "
10501         "but is explicitly disabled or interleave count is set to 1");
10502     InterleaveLoop = false;
10503   }
10504 
10505   // Override IC if user provided an interleave count.
10506   IC = UserIC > 0 ? UserIC : IC;
10507 
10508   // Emit diagnostic messages, if any.
10509   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10510   if (!VectorizeLoop && !InterleaveLoop) {
10511     // Do not vectorize or interleaving the loop.
10512     ORE->emit([&]() {
10513       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10514                                       L->getStartLoc(), L->getHeader())
10515              << VecDiagMsg.second;
10516     });
10517     ORE->emit([&]() {
10518       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10519                                       L->getStartLoc(), L->getHeader())
10520              << IntDiagMsg.second;
10521     });
10522     return false;
10523   } else if (!VectorizeLoop && InterleaveLoop) {
10524     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10525     ORE->emit([&]() {
10526       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10527                                         L->getStartLoc(), L->getHeader())
10528              << VecDiagMsg.second;
10529     });
10530   } else if (VectorizeLoop && !InterleaveLoop) {
10531     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10532                       << ") in " << DebugLocStr << '\n');
10533     ORE->emit([&]() {
10534       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10535                                         L->getStartLoc(), L->getHeader())
10536              << IntDiagMsg.second;
10537     });
10538   } else if (VectorizeLoop && InterleaveLoop) {
10539     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10540                       << ") in " << DebugLocStr << '\n');
10541     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10542   }
10543 
10544   bool DisableRuntimeUnroll = false;
10545   MDNode *OrigLoopID = L->getLoopID();
10546   {
10547     // Optimistically generate runtime checks. Drop them if they turn out to not
10548     // be profitable. Limit the scope of Checks, so the cleanup happens
10549     // immediately after vector codegeneration is done.
10550     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10551                              F->getParent()->getDataLayout());
10552     if (!VF.Width.isScalar() || IC > 1)
10553       Checks.Create(L, *LVL.getLAI(), PSE.getPredicate());
10554 
10555     using namespace ore;
10556     if (!VectorizeLoop) {
10557       assert(IC > 1 && "interleave count should not be 1 or 0");
10558       // If we decided that it is not legal to vectorize the loop, then
10559       // interleave it.
10560       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10561                                  &CM, BFI, PSI, Checks);
10562 
10563       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10564       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10565 
10566       ORE->emit([&]() {
10567         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10568                                   L->getHeader())
10569                << "interleaved loop (interleaved count: "
10570                << NV("InterleaveCount", IC) << ")";
10571       });
10572     } else {
10573       // If we decided that it is *legal* to vectorize the loop, then do it.
10574 
10575       // Consider vectorizing the epilogue too if it's profitable.
10576       VectorizationFactor EpilogueVF =
10577           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10578       if (EpilogueVF.Width.isVector()) {
10579 
10580         // The first pass vectorizes the main loop and creates a scalar epilogue
10581         // to be vectorized by executing the plan (potentially with a different
10582         // factor) again shortly afterwards.
10583         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10584         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10585                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10586 
10587         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10588         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10589                         DT);
10590         ++LoopsVectorized;
10591 
10592         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10593         formLCSSARecursively(*L, *DT, LI, SE);
10594 
10595         // Second pass vectorizes the epilogue and adjusts the control flow
10596         // edges from the first pass.
10597         EPI.MainLoopVF = EPI.EpilogueVF;
10598         EPI.MainLoopUF = EPI.EpilogueUF;
10599         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10600                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10601                                                  Checks);
10602 
10603         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10604 
10605         // Ensure that the start values for any VPReductionPHIRecipes are
10606         // updated before vectorising the epilogue loop.
10607         VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock();
10608         for (VPRecipeBase &R : Header->phis()) {
10609           if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10610             if (auto *Resume = MainILV.getReductionResumeValue(
10611                     ReductionPhi->getRecurrenceDescriptor())) {
10612               VPValue *StartVal = new VPValue(Resume);
10613               BestEpiPlan.addExternalDef(StartVal);
10614               ReductionPhi->setOperand(0, StartVal);
10615             }
10616           }
10617         }
10618 
10619         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10620                         DT);
10621         ++LoopsEpilogueVectorized;
10622 
10623         if (!MainILV.areSafetyChecksAdded())
10624           DisableRuntimeUnroll = true;
10625       } else {
10626         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10627                                &LVL, &CM, BFI, PSI, Checks);
10628 
10629         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10630         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10631         ++LoopsVectorized;
10632 
10633         // Add metadata to disable runtime unrolling a scalar loop when there
10634         // are no runtime checks about strides and memory. A scalar loop that is
10635         // rarely used is not worth unrolling.
10636         if (!LB.areSafetyChecksAdded())
10637           DisableRuntimeUnroll = true;
10638       }
10639       // Report the vectorization decision.
10640       ORE->emit([&]() {
10641         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10642                                   L->getHeader())
10643                << "vectorized loop (vectorization width: "
10644                << NV("VectorizationFactor", VF.Width)
10645                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10646       });
10647     }
10648 
10649     if (ORE->allowExtraAnalysis(LV_NAME))
10650       checkMixedPrecision(L, ORE);
10651   }
10652 
10653   Optional<MDNode *> RemainderLoopID =
10654       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10655                                       LLVMLoopVectorizeFollowupEpilogue});
10656   if (RemainderLoopID.hasValue()) {
10657     L->setLoopID(RemainderLoopID.getValue());
10658   } else {
10659     if (DisableRuntimeUnroll)
10660       AddRuntimeUnrollDisableMetaData(L);
10661 
10662     // Mark the loop as already vectorized to avoid vectorizing again.
10663     Hints.setAlreadyVectorized();
10664   }
10665 
10666   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10667   return true;
10668 }
10669 
10670 LoopVectorizeResult LoopVectorizePass::runImpl(
10671     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10672     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10673     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10674     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10675     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10676   SE = &SE_;
10677   LI = &LI_;
10678   TTI = &TTI_;
10679   DT = &DT_;
10680   BFI = &BFI_;
10681   TLI = TLI_;
10682   AA = &AA_;
10683   AC = &AC_;
10684   GetLAA = &GetLAA_;
10685   DB = &DB_;
10686   ORE = &ORE_;
10687   PSI = PSI_;
10688 
10689   // Don't attempt if
10690   // 1. the target claims to have no vector registers, and
10691   // 2. interleaving won't help ILP.
10692   //
10693   // The second condition is necessary because, even if the target has no
10694   // vector registers, loop vectorization may still enable scalar
10695   // interleaving.
10696   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10697       TTI->getMaxInterleaveFactor(1) < 2)
10698     return LoopVectorizeResult(false, false);
10699 
10700   bool Changed = false, CFGChanged = false;
10701 
10702   // The vectorizer requires loops to be in simplified form.
10703   // Since simplification may add new inner loops, it has to run before the
10704   // legality and profitability checks. This means running the loop vectorizer
10705   // will simplify all loops, regardless of whether anything end up being
10706   // vectorized.
10707   for (auto &L : *LI)
10708     Changed |= CFGChanged |=
10709         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10710 
10711   // Build up a worklist of inner-loops to vectorize. This is necessary as
10712   // the act of vectorizing or partially unrolling a loop creates new loops
10713   // and can invalidate iterators across the loops.
10714   SmallVector<Loop *, 8> Worklist;
10715 
10716   for (Loop *L : *LI)
10717     collectSupportedLoops(*L, LI, ORE, Worklist);
10718 
10719   LoopsAnalyzed += Worklist.size();
10720 
10721   // Now walk the identified inner loops.
10722   while (!Worklist.empty()) {
10723     Loop *L = Worklist.pop_back_val();
10724 
10725     // For the inner loops we actually process, form LCSSA to simplify the
10726     // transform.
10727     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10728 
10729     Changed |= CFGChanged |= processLoop(L);
10730   }
10731 
10732   // Process each loop nest in the function.
10733   return LoopVectorizeResult(Changed, CFGChanged);
10734 }
10735 
10736 PreservedAnalyses LoopVectorizePass::run(Function &F,
10737                                          FunctionAnalysisManager &AM) {
10738     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10739     auto &LI = AM.getResult<LoopAnalysis>(F);
10740     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10741     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10742     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10743     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10744     auto &AA = AM.getResult<AAManager>(F);
10745     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10746     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10747     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10748 
10749     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10750     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10751         [&](Loop &L) -> const LoopAccessInfo & {
10752       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10753                                         TLI, TTI, nullptr, nullptr, nullptr};
10754       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10755     };
10756     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10757     ProfileSummaryInfo *PSI =
10758         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10759     LoopVectorizeResult Result =
10760         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10761     if (!Result.MadeAnyChange)
10762       return PreservedAnalyses::all();
10763     PreservedAnalyses PA;
10764 
10765     // We currently do not preserve loopinfo/dominator analyses with outer loop
10766     // vectorization. Until this is addressed, mark these analyses as preserved
10767     // only for non-VPlan-native path.
10768     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10769     if (!EnableVPlanNativePath) {
10770       PA.preserve<LoopAnalysis>();
10771       PA.preserve<DominatorTreeAnalysis>();
10772     }
10773 
10774     if (Result.MadeCFGChange) {
10775       // Making CFG changes likely means a loop got vectorized. Indicate that
10776       // extra simplification passes should be run.
10777       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10778       // be run if runtime checks have been added.
10779       AM.getResult<ShouldRunExtraVectorPasses>(F);
10780       PA.preserve<ShouldRunExtraVectorPasses>();
10781     } else {
10782       PA.preserveSet<CFGAnalyses>();
10783     }
10784     return PA;
10785 }
10786 
10787 void LoopVectorizePass::printPipeline(
10788     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10789   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10790       OS, MapClassName2PassName);
10791 
10792   OS << "<";
10793   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10794   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10795   OS << ">";
10796 }
10797