1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/Metadata.h"
116 #include "llvm/IR/Module.h"
117 #include "llvm/IR/Operator.h"
118 #include "llvm/IR/PatternMatch.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <functional>
147 #include <iterator>
148 #include <limits>
149 #include <memory>
150 #include <string>
151 #include <tuple>
152 #include <utility>
153 
154 using namespace llvm;
155 
156 #define LV_NAME "loop-vectorize"
157 #define DEBUG_TYPE LV_NAME
158 
159 #ifndef NDEBUG
160 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
161 #endif
162 
163 /// @{
164 /// Metadata attribute names
165 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
166 const char LLVMLoopVectorizeFollowupVectorized[] =
167     "llvm.loop.vectorize.followup_vectorized";
168 const char LLVMLoopVectorizeFollowupEpilogue[] =
169     "llvm.loop.vectorize.followup_epilogue";
170 /// @}
171 
172 STATISTIC(LoopsVectorized, "Number of loops vectorized");
173 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
174 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
175 
176 static cl::opt<bool> EnableEpilogueVectorization(
177     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
178     cl::desc("Enable vectorization of epilogue loops."));
179 
180 static cl::opt<unsigned> EpilogueVectorizationForceVF(
181     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
182     cl::desc("When epilogue vectorization is enabled, and a value greater than "
183              "1 is specified, forces the given VF for all applicable epilogue "
184              "loops."));
185 
186 static cl::opt<unsigned> EpilogueVectorizationMinVF(
187     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
188     cl::desc("Only loops with vectorization factor equal to or larger than "
189              "the specified value are considered for epilogue vectorization."));
190 
191 /// Loops with a known constant trip count below this number are vectorized only
192 /// if no scalar iteration overheads are incurred.
193 static cl::opt<unsigned> TinyTripCountVectorThreshold(
194     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
195     cl::desc("Loops with a constant trip count that is smaller than this "
196              "value are vectorized only if no scalar iteration overheads "
197              "are incurred."));
198 
199 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
200     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
201     cl::desc("The maximum allowed number of runtime memory checks with a "
202              "vectorize(enable) pragma."));
203 
204 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
205 // that predication is preferred, and this lists all options. I.e., the
206 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
207 // and predicate the instructions accordingly. If tail-folding fails, there are
208 // different fallback strategies depending on these values:
209 namespace PreferPredicateTy {
210   enum Option {
211     ScalarEpilogue = 0,
212     PredicateElseScalarEpilogue,
213     PredicateOrDontVectorize
214   };
215 } // namespace PreferPredicateTy
216 
217 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
218     "prefer-predicate-over-epilogue",
219     cl::init(PreferPredicateTy::ScalarEpilogue),
220     cl::Hidden,
221     cl::desc("Tail-folding and predication preferences over creating a scalar "
222              "epilogue loop."),
223     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
224                          "scalar-epilogue",
225                          "Don't tail-predicate loops, create scalar epilogue"),
226               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
227                          "predicate-else-scalar-epilogue",
228                          "prefer tail-folding, create scalar epilogue if tail "
229                          "folding fails."),
230               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
231                          "predicate-dont-vectorize",
232                          "prefers tail-folding, don't attempt vectorization if "
233                          "tail-folding fails.")));
234 
235 static cl::opt<bool> MaximizeBandwidth(
236     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
237     cl::desc("Maximize bandwidth when selecting vectorization factor which "
238              "will be determined by the smallest type in loop."));
239 
240 static cl::opt<bool> EnableInterleavedMemAccesses(
241     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
242     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
243 
244 /// An interleave-group may need masking if it resides in a block that needs
245 /// predication, or in order to mask away gaps.
246 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
247     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
248     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
249 
250 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
251     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
252     cl::desc("We don't interleave loops with a estimated constant trip count "
253              "below this number"));
254 
255 static cl::opt<unsigned> ForceTargetNumScalarRegs(
256     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
257     cl::desc("A flag that overrides the target's number of scalar registers."));
258 
259 static cl::opt<unsigned> ForceTargetNumVectorRegs(
260     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
261     cl::desc("A flag that overrides the target's number of vector registers."));
262 
263 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
264     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
265     cl::desc("A flag that overrides the target's max interleave factor for "
266              "scalar loops."));
267 
268 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
269     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
270     cl::desc("A flag that overrides the target's max interleave factor for "
271              "vectorized loops."));
272 
273 static cl::opt<unsigned> ForceTargetInstructionCost(
274     "force-target-instruction-cost", cl::init(0), cl::Hidden,
275     cl::desc("A flag that overrides the target's expected cost for "
276              "an instruction to a single constant value. Mostly "
277              "useful for getting consistent testing."));
278 
279 static cl::opt<bool> ForceTargetSupportsScalableVectors(
280     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
281     cl::desc(
282         "Pretend that scalable vectors are supported, even if the target does "
283         "not support them. This flag should only be used for testing."));
284 
285 static cl::opt<unsigned> SmallLoopCost(
286     "small-loop-cost", cl::init(20), cl::Hidden,
287     cl::desc(
288         "The cost of a loop that is considered 'small' by the interleaver."));
289 
290 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
291     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
292     cl::desc("Enable the use of the block frequency analysis to access PGO "
293              "heuristics minimizing code growth in cold regions and being more "
294              "aggressive in hot regions."));
295 
296 // Runtime interleave loops for load/store throughput.
297 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
298     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
299     cl::desc(
300         "Enable runtime interleaving until load/store ports are saturated"));
301 
302 /// Interleave small loops with scalar reductions.
303 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
304     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
305     cl::desc("Enable interleaving for loops with small iteration counts that "
306              "contain scalar reductions to expose ILP."));
307 
308 /// The number of stores in a loop that are allowed to need predication.
309 static cl::opt<unsigned> NumberOfStoresToPredicate(
310     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
311     cl::desc("Max number of stores to be predicated behind an if."));
312 
313 static cl::opt<bool> EnableIndVarRegisterHeur(
314     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
315     cl::desc("Count the induction variable only once when interleaving"));
316 
317 static cl::opt<bool> EnableCondStoresVectorization(
318     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
319     cl::desc("Enable if predication of stores during vectorization."));
320 
321 static cl::opt<unsigned> MaxNestedScalarReductionIC(
322     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
323     cl::desc("The maximum interleave count to use when interleaving a scalar "
324              "reduction in a nested loop."));
325 
326 static cl::opt<bool>
327     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
328                            cl::Hidden,
329                            cl::desc("Prefer in-loop vector reductions, "
330                                     "overriding the targets preference."));
331 
332 static cl::opt<bool> ForceOrderedReductions(
333     "force-ordered-reductions", cl::init(false), cl::Hidden,
334     cl::desc("Enable the vectorisation of loops with in-order (strict) "
335              "FP reductions"));
336 
337 static cl::opt<bool> PreferPredicatedReductionSelect(
338     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
339     cl::desc(
340         "Prefer predicating a reduction operation over an after loop select."));
341 
342 cl::opt<bool> EnableVPlanNativePath(
343     "enable-vplan-native-path", cl::init(false), cl::Hidden,
344     cl::desc("Enable VPlan-native vectorization path with "
345              "support for outer loop vectorization."));
346 
347 // FIXME: Remove this switch once we have divergence analysis. Currently we
348 // assume divergent non-backedge branches when this switch is true.
349 cl::opt<bool> EnableVPlanPredication(
350     "enable-vplan-predication", cl::init(false), cl::Hidden,
351     cl::desc("Enable VPlan-native vectorization path predicator with "
352              "support for outer loop vectorization."));
353 
354 // This flag enables the stress testing of the VPlan H-CFG construction in the
355 // VPlan-native vectorization path. It must be used in conjuction with
356 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
357 // verification of the H-CFGs built.
358 static cl::opt<bool> VPlanBuildStressTest(
359     "vplan-build-stress-test", cl::init(false), cl::Hidden,
360     cl::desc(
361         "Build VPlan for every supported loop nest in the function and bail "
362         "out right after the build (stress test the VPlan H-CFG construction "
363         "in the VPlan-native vectorization path)."));
364 
365 cl::opt<bool> llvm::EnableLoopInterleaving(
366     "interleave-loops", cl::init(true), cl::Hidden,
367     cl::desc("Enable loop interleaving in Loop vectorization passes"));
368 cl::opt<bool> llvm::EnableLoopVectorization(
369     "vectorize-loops", cl::init(true), cl::Hidden,
370     cl::desc("Run the Loop vectorization passes"));
371 
372 cl::opt<bool> PrintVPlansInDotFormat(
373     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
374     cl::desc("Use dot format instead of plain text when dumping VPlans"));
375 
376 /// A helper function that returns true if the given type is irregular. The
377 /// type is irregular if its allocated size doesn't equal the store size of an
378 /// element of the corresponding vector type.
379 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
380   // Determine if an array of N elements of type Ty is "bitcast compatible"
381   // with a <N x Ty> vector.
382   // This is only true if there is no padding between the array elements.
383   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
384 }
385 
386 /// A helper function that returns the reciprocal of the block probability of
387 /// predicated blocks. If we return X, we are assuming the predicated block
388 /// will execute once for every X iterations of the loop header.
389 ///
390 /// TODO: We should use actual block probability here, if available. Currently,
391 ///       we always assume predicated blocks have a 50% chance of executing.
392 static unsigned getReciprocalPredBlockProb() { return 2; }
393 
394 /// A helper function that returns an integer or floating-point constant with
395 /// value C.
396 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
397   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
398                            : ConstantFP::get(Ty, C);
399 }
400 
401 /// Returns "best known" trip count for the specified loop \p L as defined by
402 /// the following procedure:
403 ///   1) Returns exact trip count if it is known.
404 ///   2) Returns expected trip count according to profile data if any.
405 ///   3) Returns upper bound estimate if it is known.
406 ///   4) Returns None if all of the above failed.
407 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
408   // Check if exact trip count is known.
409   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
410     return ExpectedTC;
411 
412   // Check if there is an expected trip count available from profile data.
413   if (LoopVectorizeWithBlockFrequency)
414     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
415       return EstimatedTC;
416 
417   // Check if upper bound estimate is known.
418   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
419     return ExpectedTC;
420 
421   return None;
422 }
423 
424 // Forward declare GeneratedRTChecks.
425 class GeneratedRTChecks;
426 
427 namespace llvm {
428 
429 AnalysisKey ShouldRunExtraVectorPasses::Key;
430 
431 /// InnerLoopVectorizer vectorizes loops which contain only one basic
432 /// block to a specified vectorization factor (VF).
433 /// This class performs the widening of scalars into vectors, or multiple
434 /// scalars. This class also implements the following features:
435 /// * It inserts an epilogue loop for handling loops that don't have iteration
436 ///   counts that are known to be a multiple of the vectorization factor.
437 /// * It handles the code generation for reduction variables.
438 /// * Scalarization (implementation using scalars) of un-vectorizable
439 ///   instructions.
440 /// InnerLoopVectorizer does not perform any vectorization-legality
441 /// checks, and relies on the caller to check for the different legality
442 /// aspects. The InnerLoopVectorizer relies on the
443 /// LoopVectorizationLegality class to provide information about the induction
444 /// and reduction variables that were found to a given vectorization factor.
445 class InnerLoopVectorizer {
446 public:
447   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
448                       LoopInfo *LI, DominatorTree *DT,
449                       const TargetLibraryInfo *TLI,
450                       const TargetTransformInfo *TTI, AssumptionCache *AC,
451                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
452                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
453                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
454                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
455       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
456         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
457         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
458         PSI(PSI), RTChecks(RTChecks) {
459     // Query this against the original loop and save it here because the profile
460     // of the original loop header may change as the transformation happens.
461     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
462         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
463   }
464 
465   virtual ~InnerLoopVectorizer() = default;
466 
467   /// Create a new empty loop that will contain vectorized instructions later
468   /// on, while the old loop will be used as the scalar remainder. Control flow
469   /// is generated around the vectorized (and scalar epilogue) loops consisting
470   /// of various checks and bypasses. Return the pre-header block of the new
471   /// loop and the start value for the canonical induction, if it is != 0. The
472   /// latter is the case when vectorizing the epilogue loop. In the case of
473   /// epilogue vectorization, this function is overriden to handle the more
474   /// complex control flow around the loops.
475   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
476 
477   /// Widen a single call instruction within the innermost loop.
478   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
479                             VPTransformState &State);
480 
481   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
482   void fixVectorizedLoop(VPTransformState &State);
483 
484   // Return true if any runtime check is added.
485   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
486 
487   /// A type for vectorized values in the new loop. Each value from the
488   /// original loop, when vectorized, is represented by UF vector values in the
489   /// new unrolled loop, where UF is the unroll factor.
490   using VectorParts = SmallVector<Value *, 2>;
491 
492   /// Vectorize a single first-order recurrence or pointer induction PHINode in
493   /// a block. This method handles the induction variable canonicalization. It
494   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
495   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
496                            VPTransformState &State);
497 
498   /// A helper function to scalarize a single Instruction in the innermost loop.
499   /// Generates a sequence of scalar instances for each lane between \p MinLane
500   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
501   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
502   /// Instr's operands.
503   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
504                             const VPIteration &Instance, bool IfPredicateInstr,
505                             VPTransformState &State);
506 
507   /// Construct the vector value of a scalarized value \p V one lane at a time.
508   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
509                                  VPTransformState &State);
510 
511   /// Try to vectorize interleaved access group \p Group with the base address
512   /// given in \p Addr, optionally masking the vector operations if \p
513   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
514   /// values in the vectorized loop.
515   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
516                                 ArrayRef<VPValue *> VPDefs,
517                                 VPTransformState &State, VPValue *Addr,
518                                 ArrayRef<VPValue *> StoredValues,
519                                 VPValue *BlockInMask = nullptr);
520 
521   /// Set the debug location in the builder \p Ptr using the debug location in
522   /// \p V. If \p Ptr is None then it uses the class member's Builder.
523   void setDebugLocFromInst(const Value *V,
524                            Optional<IRBuilderBase *> CustomBuilder = None);
525 
526   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
527   void fixNonInductionPHIs(VPTransformState &State);
528 
529   /// Returns true if the reordering of FP operations is not allowed, but we are
530   /// able to vectorize with strict in-order reductions for the given RdxDesc.
531   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
532 
533   /// Create a broadcast instruction. This method generates a broadcast
534   /// instruction (shuffle) for loop invariant values and for the induction
535   /// value. If this is the induction variable then we extend it to N, N+1, ...
536   /// this is needed because each iteration in the loop corresponds to a SIMD
537   /// element.
538   virtual Value *getBroadcastInstrs(Value *V);
539 
540   /// Add metadata from one instruction to another.
541   ///
542   /// This includes both the original MDs from \p From and additional ones (\see
543   /// addNewMetadata).  Use this for *newly created* instructions in the vector
544   /// loop.
545   void addMetadata(Instruction *To, Instruction *From);
546 
547   /// Similar to the previous function but it adds the metadata to a
548   /// vector of instructions.
549   void addMetadata(ArrayRef<Value *> To, Instruction *From);
550 
551   // Returns the resume value (bc.merge.rdx) for a reduction as
552   // generated by fixReduction.
553   PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
554 
555 protected:
556   friend class LoopVectorizationPlanner;
557 
558   /// A small list of PHINodes.
559   using PhiVector = SmallVector<PHINode *, 4>;
560 
561   /// A type for scalarized values in the new loop. Each value from the
562   /// original loop, when scalarized, is represented by UF x VF scalar values
563   /// in the new unrolled loop, where UF is the unroll factor and VF is the
564   /// vectorization factor.
565   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
566 
567   /// Set up the values of the IVs correctly when exiting the vector loop.
568   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
569                     Value *CountRoundDown, Value *EndValue,
570                     BasicBlock *MiddleBlock, BasicBlock *VectorHeader);
571 
572   /// Introduce a conditional branch (on true, condition to be set later) at the
573   /// end of the header=latch connecting it to itself (across the backedge) and
574   /// to the exit block of \p L.
575   void createHeaderBranch(Loop *L);
576 
577   /// Handle all cross-iteration phis in the header.
578   void fixCrossIterationPHIs(VPTransformState &State);
579 
580   /// Create the exit value of first order recurrences in the middle block and
581   /// update their users.
582   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
583                                VPTransformState &State);
584 
585   /// Create code for the loop exit value of the reduction.
586   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
587 
588   /// Clear NSW/NUW flags from reduction instructions if necessary.
589   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
590                                VPTransformState &State);
591 
592   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
593   /// means we need to add the appropriate incoming value from the middle
594   /// block as exiting edges from the scalar epilogue loop (if present) are
595   /// already in place, and we exit the vector loop exclusively to the middle
596   /// block.
597   void fixLCSSAPHIs(VPTransformState &State);
598 
599   /// Iteratively sink the scalarized operands of a predicated instruction into
600   /// the block that was created for it.
601   void sinkScalarOperands(Instruction *PredInst);
602 
603   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
604   /// represented as.
605   void truncateToMinimalBitwidths(VPTransformState &State);
606 
607   /// Returns (and creates if needed) the original loop trip count.
608   Value *getOrCreateTripCount(Loop *NewLoop);
609 
610   /// Returns (and creates if needed) the trip count of the widened loop.
611   Value *getOrCreateVectorTripCount(Loop *NewLoop);
612 
613   /// Returns a bitcasted value to the requested vector type.
614   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
615   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
616                                 const DataLayout &DL);
617 
618   /// Emit a bypass check to see if the vector trip count is zero, including if
619   /// it overflows.
620   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
621 
622   /// Emit a bypass check to see if all of the SCEV assumptions we've
623   /// had to make are correct. Returns the block containing the checks or
624   /// nullptr if no checks have been added.
625   BasicBlock *emitSCEVChecks(BasicBlock *Bypass);
626 
627   /// Emit bypass checks to check any memory assumptions we may have made.
628   /// Returns the block containing the checks or nullptr if no checks have been
629   /// added.
630   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
631 
632   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
633   /// vector loop preheader, middle block and scalar preheader. Also
634   /// allocate a loop object for the new vector loop and return it.
635   Loop *createVectorLoopSkeleton(StringRef Prefix);
636 
637   /// Create new phi nodes for the induction variables to resume iteration count
638   /// in the scalar epilogue, from where the vectorized loop left off.
639   /// In cases where the loop skeleton is more complicated (eg. epilogue
640   /// vectorization) and the resume values can come from an additional bypass
641   /// block, the \p AdditionalBypass pair provides information about the bypass
642   /// block and the end value on the edge from bypass to this loop.
643   void createInductionResumeValues(
644       Loop *L,
645       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
646 
647   /// Complete the loop skeleton by adding debug MDs, creating appropriate
648   /// conditional branches in the middle block, preparing the builder and
649   /// running the verifier. Take in the vector loop \p L as argument, and return
650   /// the preheader of the completed vector loop.
651   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
652 
653   /// Add additional metadata to \p To that was not present on \p Orig.
654   ///
655   /// Currently this is used to add the noalias annotations based on the
656   /// inserted memchecks.  Use this for instructions that are *cloned* into the
657   /// vector loop.
658   void addNewMetadata(Instruction *To, const Instruction *Orig);
659 
660   /// Collect poison-generating recipes that may generate a poison value that is
661   /// used after vectorization, even when their operands are not poison. Those
662   /// recipes meet the following conditions:
663   ///  * Contribute to the address computation of a recipe generating a widen
664   ///    memory load/store (VPWidenMemoryInstructionRecipe or
665   ///    VPInterleaveRecipe).
666   ///  * Such a widen memory load/store has at least one underlying Instruction
667   ///    that is in a basic block that needs predication and after vectorization
668   ///    the generated instruction won't be predicated.
669   void collectPoisonGeneratingRecipes(VPTransformState &State);
670 
671   /// Allow subclasses to override and print debug traces before/after vplan
672   /// execution, when trace information is requested.
673   virtual void printDebugTracesAtStart(){};
674   virtual void printDebugTracesAtEnd(){};
675 
676   /// The original loop.
677   Loop *OrigLoop;
678 
679   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
680   /// dynamic knowledge to simplify SCEV expressions and converts them to a
681   /// more usable form.
682   PredicatedScalarEvolution &PSE;
683 
684   /// Loop Info.
685   LoopInfo *LI;
686 
687   /// Dominator Tree.
688   DominatorTree *DT;
689 
690   /// Alias Analysis.
691   AAResults *AA;
692 
693   /// Target Library Info.
694   const TargetLibraryInfo *TLI;
695 
696   /// Target Transform Info.
697   const TargetTransformInfo *TTI;
698 
699   /// Assumption Cache.
700   AssumptionCache *AC;
701 
702   /// Interface to emit optimization remarks.
703   OptimizationRemarkEmitter *ORE;
704 
705   /// LoopVersioning.  It's only set up (non-null) if memchecks were
706   /// used.
707   ///
708   /// This is currently only used to add no-alias metadata based on the
709   /// memchecks.  The actually versioning is performed manually.
710   std::unique_ptr<LoopVersioning> LVer;
711 
712   /// The vectorization SIMD factor to use. Each vector will have this many
713   /// vector elements.
714   ElementCount VF;
715 
716   /// The vectorization unroll factor to use. Each scalar is vectorized to this
717   /// many different vector instructions.
718   unsigned UF;
719 
720   /// The builder that we use
721   IRBuilder<> Builder;
722 
723   // --- Vectorization state ---
724 
725   /// The vector-loop preheader.
726   BasicBlock *LoopVectorPreHeader;
727 
728   /// The scalar-loop preheader.
729   BasicBlock *LoopScalarPreHeader;
730 
731   /// Middle Block between the vector and the scalar.
732   BasicBlock *LoopMiddleBlock;
733 
734   /// The unique ExitBlock of the scalar loop if one exists.  Note that
735   /// there can be multiple exiting edges reaching this block.
736   BasicBlock *LoopExitBlock;
737 
738   /// The scalar loop body.
739   BasicBlock *LoopScalarBody;
740 
741   /// A list of all bypass blocks. The first block is the entry of the loop.
742   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
743 
744   /// Store instructions that were predicated.
745   SmallVector<Instruction *, 4> PredicatedInstructions;
746 
747   /// Trip count of the original loop.
748   Value *TripCount = nullptr;
749 
750   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
751   Value *VectorTripCount = nullptr;
752 
753   /// The legality analysis.
754   LoopVectorizationLegality *Legal;
755 
756   /// The profitablity analysis.
757   LoopVectorizationCostModel *Cost;
758 
759   // Record whether runtime checks are added.
760   bool AddedSafetyChecks = false;
761 
762   // Holds the end values for each induction variable. We save the end values
763   // so we can later fix-up the external users of the induction variables.
764   DenseMap<PHINode *, Value *> IVEndValues;
765 
766   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
767   // fixed up at the end of vector code generation.
768   SmallVector<PHINode *, 8> OrigPHIsToFix;
769 
770   /// BFI and PSI are used to check for profile guided size optimizations.
771   BlockFrequencyInfo *BFI;
772   ProfileSummaryInfo *PSI;
773 
774   // Whether this loop should be optimized for size based on profile guided size
775   // optimizatios.
776   bool OptForSizeBasedOnProfile;
777 
778   /// Structure to hold information about generated runtime checks, responsible
779   /// for cleaning the checks, if vectorization turns out unprofitable.
780   GeneratedRTChecks &RTChecks;
781 
782   // Holds the resume values for reductions in the loops, used to set the
783   // correct start value of reduction PHIs when vectorizing the epilogue.
784   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
785       ReductionResumeValues;
786 };
787 
788 class InnerLoopUnroller : public InnerLoopVectorizer {
789 public:
790   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
791                     LoopInfo *LI, DominatorTree *DT,
792                     const TargetLibraryInfo *TLI,
793                     const TargetTransformInfo *TTI, AssumptionCache *AC,
794                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
795                     LoopVectorizationLegality *LVL,
796                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
797                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
798       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
799                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
800                             BFI, PSI, Check) {}
801 
802 private:
803   Value *getBroadcastInstrs(Value *V) override;
804 };
805 
806 /// Encapsulate information regarding vectorization of a loop and its epilogue.
807 /// This information is meant to be updated and used across two stages of
808 /// epilogue vectorization.
809 struct EpilogueLoopVectorizationInfo {
810   ElementCount MainLoopVF = ElementCount::getFixed(0);
811   unsigned MainLoopUF = 0;
812   ElementCount EpilogueVF = ElementCount::getFixed(0);
813   unsigned EpilogueUF = 0;
814   BasicBlock *MainLoopIterationCountCheck = nullptr;
815   BasicBlock *EpilogueIterationCountCheck = nullptr;
816   BasicBlock *SCEVSafetyCheck = nullptr;
817   BasicBlock *MemSafetyCheck = nullptr;
818   Value *TripCount = nullptr;
819   Value *VectorTripCount = nullptr;
820 
821   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
822                                 ElementCount EVF, unsigned EUF)
823       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
824     assert(EUF == 1 &&
825            "A high UF for the epilogue loop is likely not beneficial.");
826   }
827 };
828 
829 /// An extension of the inner loop vectorizer that creates a skeleton for a
830 /// vectorized loop that has its epilogue (residual) also vectorized.
831 /// The idea is to run the vplan on a given loop twice, firstly to setup the
832 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
833 /// from the first step and vectorize the epilogue.  This is achieved by
834 /// deriving two concrete strategy classes from this base class and invoking
835 /// them in succession from the loop vectorizer planner.
836 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
837 public:
838   InnerLoopAndEpilogueVectorizer(
839       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
840       DominatorTree *DT, const TargetLibraryInfo *TLI,
841       const TargetTransformInfo *TTI, AssumptionCache *AC,
842       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
843       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
844       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
845       GeneratedRTChecks &Checks)
846       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
847                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
848                             Checks),
849         EPI(EPI) {}
850 
851   // Override this function to handle the more complex control flow around the
852   // three loops.
853   std::pair<BasicBlock *, Value *>
854   createVectorizedLoopSkeleton() final override {
855     return createEpilogueVectorizedLoopSkeleton();
856   }
857 
858   /// The interface for creating a vectorized skeleton using one of two
859   /// different strategies, each corresponding to one execution of the vplan
860   /// as described above.
861   virtual std::pair<BasicBlock *, Value *>
862   createEpilogueVectorizedLoopSkeleton() = 0;
863 
864   /// Holds and updates state information required to vectorize the main loop
865   /// and its epilogue in two separate passes. This setup helps us avoid
866   /// regenerating and recomputing runtime safety checks. It also helps us to
867   /// shorten the iteration-count-check path length for the cases where the
868   /// iteration count of the loop is so small that the main vector loop is
869   /// completely skipped.
870   EpilogueLoopVectorizationInfo &EPI;
871 };
872 
873 /// A specialized derived class of inner loop vectorizer that performs
874 /// vectorization of *main* loops in the process of vectorizing loops and their
875 /// epilogues.
876 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
877 public:
878   EpilogueVectorizerMainLoop(
879       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
880       DominatorTree *DT, const TargetLibraryInfo *TLI,
881       const TargetTransformInfo *TTI, AssumptionCache *AC,
882       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
883       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
884       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
885       GeneratedRTChecks &Check)
886       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
887                                        EPI, LVL, CM, BFI, PSI, Check) {}
888   /// Implements the interface for creating a vectorized skeleton using the
889   /// *main loop* strategy (ie the first pass of vplan execution).
890   std::pair<BasicBlock *, Value *>
891   createEpilogueVectorizedLoopSkeleton() final override;
892 
893 protected:
894   /// Emits an iteration count bypass check once for the main loop (when \p
895   /// ForEpilogue is false) and once for the epilogue loop (when \p
896   /// ForEpilogue is true).
897   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
898                                              bool ForEpilogue);
899   void printDebugTracesAtStart() override;
900   void printDebugTracesAtEnd() override;
901 };
902 
903 // A specialized derived class of inner loop vectorizer that performs
904 // vectorization of *epilogue* loops in the process of vectorizing loops and
905 // their epilogues.
906 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
907 public:
908   EpilogueVectorizerEpilogueLoop(
909       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
910       DominatorTree *DT, const TargetLibraryInfo *TLI,
911       const TargetTransformInfo *TTI, AssumptionCache *AC,
912       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
913       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
914       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
915       GeneratedRTChecks &Checks)
916       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
917                                        EPI, LVL, CM, BFI, PSI, Checks) {}
918   /// Implements the interface for creating a vectorized skeleton using the
919   /// *epilogue loop* strategy (ie the second pass of vplan execution).
920   std::pair<BasicBlock *, Value *>
921   createEpilogueVectorizedLoopSkeleton() final override;
922 
923 protected:
924   /// Emits an iteration count bypass check after the main vector loop has
925   /// finished to see if there are any iterations left to execute by either
926   /// the vector epilogue or the scalar epilogue.
927   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(
928                                                       BasicBlock *Bypass,
929                                                       BasicBlock *Insert);
930   void printDebugTracesAtStart() override;
931   void printDebugTracesAtEnd() override;
932 };
933 } // end namespace llvm
934 
935 /// Look for a meaningful debug location on the instruction or it's
936 /// operands.
937 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
938   if (!I)
939     return I;
940 
941   DebugLoc Empty;
942   if (I->getDebugLoc() != Empty)
943     return I;
944 
945   for (Use &Op : I->operands()) {
946     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
947       if (OpInst->getDebugLoc() != Empty)
948         return OpInst;
949   }
950 
951   return I;
952 }
953 
954 void InnerLoopVectorizer::setDebugLocFromInst(
955     const Value *V, Optional<IRBuilderBase *> CustomBuilder) {
956   IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
957   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
958     const DILocation *DIL = Inst->getDebugLoc();
959 
960     // When a FSDiscriminator is enabled, we don't need to add the multiply
961     // factors to the discriminators.
962     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
963         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
964       // FIXME: For scalable vectors, assume vscale=1.
965       auto NewDIL =
966           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
967       if (NewDIL)
968         B->SetCurrentDebugLocation(NewDIL.getValue());
969       else
970         LLVM_DEBUG(dbgs()
971                    << "Failed to create new discriminator: "
972                    << DIL->getFilename() << " Line: " << DIL->getLine());
973     } else
974       B->SetCurrentDebugLocation(DIL);
975   } else
976     B->SetCurrentDebugLocation(DebugLoc());
977 }
978 
979 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
980 /// is passed, the message relates to that particular instruction.
981 #ifndef NDEBUG
982 static void debugVectorizationMessage(const StringRef Prefix,
983                                       const StringRef DebugMsg,
984                                       Instruction *I) {
985   dbgs() << "LV: " << Prefix << DebugMsg;
986   if (I != nullptr)
987     dbgs() << " " << *I;
988   else
989     dbgs() << '.';
990   dbgs() << '\n';
991 }
992 #endif
993 
994 /// Create an analysis remark that explains why vectorization failed
995 ///
996 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
997 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
998 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
999 /// the location of the remark.  \return the remark object that can be
1000 /// streamed to.
1001 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1002     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1003   Value *CodeRegion = TheLoop->getHeader();
1004   DebugLoc DL = TheLoop->getStartLoc();
1005 
1006   if (I) {
1007     CodeRegion = I->getParent();
1008     // If there is no debug location attached to the instruction, revert back to
1009     // using the loop's.
1010     if (I->getDebugLoc())
1011       DL = I->getDebugLoc();
1012   }
1013 
1014   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1015 }
1016 
1017 namespace llvm {
1018 
1019 /// Return a value for Step multiplied by VF.
1020 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
1021                        int64_t Step) {
1022   assert(Ty->isIntegerTy() && "Expected an integer step");
1023   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1024   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1025 }
1026 
1027 /// Return the runtime value for VF.
1028 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) {
1029   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1030   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1031 }
1032 
1033 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy,
1034                                   ElementCount VF) {
1035   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1036   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1037   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1038   return B.CreateUIToFP(RuntimeVF, FTy);
1039 }
1040 
1041 void reportVectorizationFailure(const StringRef DebugMsg,
1042                                 const StringRef OREMsg, const StringRef ORETag,
1043                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1044                                 Instruction *I) {
1045   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1046   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1047   ORE->emit(
1048       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1049       << "loop not vectorized: " << OREMsg);
1050 }
1051 
1052 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1053                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1054                              Instruction *I) {
1055   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1056   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1057   ORE->emit(
1058       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1059       << Msg);
1060 }
1061 
1062 } // end namespace llvm
1063 
1064 #ifndef NDEBUG
1065 /// \return string containing a file name and a line # for the given loop.
1066 static std::string getDebugLocString(const Loop *L) {
1067   std::string Result;
1068   if (L) {
1069     raw_string_ostream OS(Result);
1070     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1071       LoopDbgLoc.print(OS);
1072     else
1073       // Just print the module name.
1074       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1075     OS.flush();
1076   }
1077   return Result;
1078 }
1079 #endif
1080 
1081 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1082                                          const Instruction *Orig) {
1083   // If the loop was versioned with memchecks, add the corresponding no-alias
1084   // metadata.
1085   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1086     LVer->annotateInstWithNoAlias(To, Orig);
1087 }
1088 
1089 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1090     VPTransformState &State) {
1091 
1092   // Collect recipes in the backward slice of `Root` that may generate a poison
1093   // value that is used after vectorization.
1094   SmallPtrSet<VPRecipeBase *, 16> Visited;
1095   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1096     SmallVector<VPRecipeBase *, 16> Worklist;
1097     Worklist.push_back(Root);
1098 
1099     // Traverse the backward slice of Root through its use-def chain.
1100     while (!Worklist.empty()) {
1101       VPRecipeBase *CurRec = Worklist.back();
1102       Worklist.pop_back();
1103 
1104       if (!Visited.insert(CurRec).second)
1105         continue;
1106 
1107       // Prune search if we find another recipe generating a widen memory
1108       // instruction. Widen memory instructions involved in address computation
1109       // will lead to gather/scatter instructions, which don't need to be
1110       // handled.
1111       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1112           isa<VPInterleaveRecipe>(CurRec) ||
1113           isa<VPScalarIVStepsRecipe>(CurRec) ||
1114           isa<VPCanonicalIVPHIRecipe>(CurRec))
1115         continue;
1116 
1117       // This recipe contributes to the address computation of a widen
1118       // load/store. Collect recipe if its underlying instruction has
1119       // poison-generating flags.
1120       Instruction *Instr = CurRec->getUnderlyingInstr();
1121       if (Instr && Instr->hasPoisonGeneratingFlags())
1122         State.MayGeneratePoisonRecipes.insert(CurRec);
1123 
1124       // Add new definitions to the worklist.
1125       for (VPValue *operand : CurRec->operands())
1126         if (VPDef *OpDef = operand->getDef())
1127           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1128     }
1129   });
1130 
1131   // Traverse all the recipes in the VPlan and collect the poison-generating
1132   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1133   // VPInterleaveRecipe.
1134   auto Iter = depth_first(
1135       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1136   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1137     for (VPRecipeBase &Recipe : *VPBB) {
1138       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1139         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1140         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1141         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1142             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1143           collectPoisonGeneratingInstrsInBackwardSlice(
1144               cast<VPRecipeBase>(AddrDef));
1145       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1146         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1147         if (AddrDef) {
1148           // Check if any member of the interleave group needs predication.
1149           const InterleaveGroup<Instruction> *InterGroup =
1150               InterleaveRec->getInterleaveGroup();
1151           bool NeedPredication = false;
1152           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1153                I < NumMembers; ++I) {
1154             Instruction *Member = InterGroup->getMember(I);
1155             if (Member)
1156               NeedPredication |=
1157                   Legal->blockNeedsPredication(Member->getParent());
1158           }
1159 
1160           if (NeedPredication)
1161             collectPoisonGeneratingInstrsInBackwardSlice(
1162                 cast<VPRecipeBase>(AddrDef));
1163         }
1164       }
1165     }
1166   }
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(Instruction *To,
1170                                       Instruction *From) {
1171   propagateMetadata(To, From);
1172   addNewMetadata(To, From);
1173 }
1174 
1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1176                                       Instruction *From) {
1177   for (Value *V : To) {
1178     if (Instruction *I = dyn_cast<Instruction>(V))
1179       addMetadata(I, From);
1180   }
1181 }
1182 
1183 PHINode *InnerLoopVectorizer::getReductionResumeValue(
1184     const RecurrenceDescriptor &RdxDesc) {
1185   auto It = ReductionResumeValues.find(&RdxDesc);
1186   assert(It != ReductionResumeValues.end() &&
1187          "Expected to find a resume value for the reduction.");
1188   return It->second;
1189 }
1190 
1191 namespace llvm {
1192 
1193 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1194 // lowered.
1195 enum ScalarEpilogueLowering {
1196 
1197   // The default: allowing scalar epilogues.
1198   CM_ScalarEpilogueAllowed,
1199 
1200   // Vectorization with OptForSize: don't allow epilogues.
1201   CM_ScalarEpilogueNotAllowedOptSize,
1202 
1203   // A special case of vectorisation with OptForSize: loops with a very small
1204   // trip count are considered for vectorization under OptForSize, thereby
1205   // making sure the cost of their loop body is dominant, free of runtime
1206   // guards and scalar iteration overheads.
1207   CM_ScalarEpilogueNotAllowedLowTripLoop,
1208 
1209   // Loop hint predicate indicating an epilogue is undesired.
1210   CM_ScalarEpilogueNotNeededUsePredicate,
1211 
1212   // Directive indicating we must either tail fold or not vectorize
1213   CM_ScalarEpilogueNotAllowedUsePredicate
1214 };
1215 
1216 /// ElementCountComparator creates a total ordering for ElementCount
1217 /// for the purposes of using it in a set structure.
1218 struct ElementCountComparator {
1219   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1220     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1221            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1222   }
1223 };
1224 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1225 
1226 /// LoopVectorizationCostModel - estimates the expected speedups due to
1227 /// vectorization.
1228 /// In many cases vectorization is not profitable. This can happen because of
1229 /// a number of reasons. In this class we mainly attempt to predict the
1230 /// expected speedup/slowdowns due to the supported instruction set. We use the
1231 /// TargetTransformInfo to query the different backends for the cost of
1232 /// different operations.
1233 class LoopVectorizationCostModel {
1234 public:
1235   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1236                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1237                              LoopVectorizationLegality *Legal,
1238                              const TargetTransformInfo &TTI,
1239                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1240                              AssumptionCache *AC,
1241                              OptimizationRemarkEmitter *ORE, const Function *F,
1242                              const LoopVectorizeHints *Hints,
1243                              InterleavedAccessInfo &IAI)
1244       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1245         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1246         Hints(Hints), InterleaveInfo(IAI) {}
1247 
1248   /// \return An upper bound for the vectorization factors (both fixed and
1249   /// scalable). If the factors are 0, vectorization and interleaving should be
1250   /// avoided up front.
1251   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1252 
1253   /// \return True if runtime checks are required for vectorization, and false
1254   /// otherwise.
1255   bool runtimeChecksRequired();
1256 
1257   /// \return The most profitable vectorization factor and the cost of that VF.
1258   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1259   /// then this vectorization factor will be selected if vectorization is
1260   /// possible.
1261   VectorizationFactor
1262   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1263 
1264   VectorizationFactor
1265   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1266                                     const LoopVectorizationPlanner &LVP);
1267 
1268   /// Setup cost-based decisions for user vectorization factor.
1269   /// \return true if the UserVF is a feasible VF to be chosen.
1270   bool selectUserVectorizationFactor(ElementCount UserVF) {
1271     collectUniformsAndScalars(UserVF);
1272     collectInstsToScalarize(UserVF);
1273     return expectedCost(UserVF).first.isValid();
1274   }
1275 
1276   /// \return The size (in bits) of the smallest and widest types in the code
1277   /// that needs to be vectorized. We ignore values that remain scalar such as
1278   /// 64 bit loop indices.
1279   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1280 
1281   /// \return The desired interleave count.
1282   /// If interleave count has been specified by metadata it will be returned.
1283   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1284   /// are the selected vectorization factor and the cost of the selected VF.
1285   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1286 
1287   /// Memory access instruction may be vectorized in more than one way.
1288   /// Form of instruction after vectorization depends on cost.
1289   /// This function takes cost-based decisions for Load/Store instructions
1290   /// and collects them in a map. This decisions map is used for building
1291   /// the lists of loop-uniform and loop-scalar instructions.
1292   /// The calculated cost is saved with widening decision in order to
1293   /// avoid redundant calculations.
1294   void setCostBasedWideningDecision(ElementCount VF);
1295 
1296   /// A struct that represents some properties of the register usage
1297   /// of a loop.
1298   struct RegisterUsage {
1299     /// Holds the number of loop invariant values that are used in the loop.
1300     /// The key is ClassID of target-provided register class.
1301     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1302     /// Holds the maximum number of concurrent live intervals in the loop.
1303     /// The key is ClassID of target-provided register class.
1304     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1305   };
1306 
1307   /// \return Returns information about the register usages of the loop for the
1308   /// given vectorization factors.
1309   SmallVector<RegisterUsage, 8>
1310   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1311 
1312   /// Collect values we want to ignore in the cost model.
1313   void collectValuesToIgnore();
1314 
1315   /// Collect all element types in the loop for which widening is needed.
1316   void collectElementTypesForWidening();
1317 
1318   /// Split reductions into those that happen in the loop, and those that happen
1319   /// outside. In loop reductions are collected into InLoopReductionChains.
1320   void collectInLoopReductions();
1321 
1322   /// Returns true if we should use strict in-order reductions for the given
1323   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1324   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1325   /// of FP operations.
1326   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1327     return !Hints->allowReordering() && RdxDesc.isOrdered();
1328   }
1329 
1330   /// \returns The smallest bitwidth each instruction can be represented with.
1331   /// The vector equivalents of these instructions should be truncated to this
1332   /// type.
1333   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1334     return MinBWs;
1335   }
1336 
1337   /// \returns True if it is more profitable to scalarize instruction \p I for
1338   /// vectorization factor \p VF.
1339   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1340     assert(VF.isVector() &&
1341            "Profitable to scalarize relevant only for VF > 1.");
1342 
1343     // Cost model is not run in the VPlan-native path - return conservative
1344     // result until this changes.
1345     if (EnableVPlanNativePath)
1346       return false;
1347 
1348     auto Scalars = InstsToScalarize.find(VF);
1349     assert(Scalars != InstsToScalarize.end() &&
1350            "VF not yet analyzed for scalarization profitability");
1351     return Scalars->second.find(I) != Scalars->second.end();
1352   }
1353 
1354   /// Returns true if \p I is known to be uniform after vectorization.
1355   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1356     if (VF.isScalar())
1357       return true;
1358 
1359     // Cost model is not run in the VPlan-native path - return conservative
1360     // result until this changes.
1361     if (EnableVPlanNativePath)
1362       return false;
1363 
1364     auto UniformsPerVF = Uniforms.find(VF);
1365     assert(UniformsPerVF != Uniforms.end() &&
1366            "VF not yet analyzed for uniformity");
1367     return UniformsPerVF->second.count(I);
1368   }
1369 
1370   /// Returns true if \p I is known to be scalar after vectorization.
1371   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1372     if (VF.isScalar())
1373       return true;
1374 
1375     // Cost model is not run in the VPlan-native path - return conservative
1376     // result until this changes.
1377     if (EnableVPlanNativePath)
1378       return false;
1379 
1380     auto ScalarsPerVF = Scalars.find(VF);
1381     assert(ScalarsPerVF != Scalars.end() &&
1382            "Scalar values are not calculated for VF");
1383     return ScalarsPerVF->second.count(I);
1384   }
1385 
1386   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1387   /// for vectorization factor \p VF.
1388   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1389     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1390            !isProfitableToScalarize(I, VF) &&
1391            !isScalarAfterVectorization(I, VF);
1392   }
1393 
1394   /// Decision that was taken during cost calculation for memory instruction.
1395   enum InstWidening {
1396     CM_Unknown,
1397     CM_Widen,         // For consecutive accesses with stride +1.
1398     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1399     CM_Interleave,
1400     CM_GatherScatter,
1401     CM_Scalarize
1402   };
1403 
1404   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1405   /// instruction \p I and vector width \p VF.
1406   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1407                            InstructionCost Cost) {
1408     assert(VF.isVector() && "Expected VF >=2");
1409     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1410   }
1411 
1412   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1413   /// interleaving group \p Grp and vector width \p VF.
1414   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1415                            ElementCount VF, InstWidening W,
1416                            InstructionCost Cost) {
1417     assert(VF.isVector() && "Expected VF >=2");
1418     /// Broadcast this decicion to all instructions inside the group.
1419     /// But the cost will be assigned to one instruction only.
1420     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1421       if (auto *I = Grp->getMember(i)) {
1422         if (Grp->getInsertPos() == I)
1423           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1424         else
1425           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1426       }
1427     }
1428   }
1429 
1430   /// Return the cost model decision for the given instruction \p I and vector
1431   /// width \p VF. Return CM_Unknown if this instruction did not pass
1432   /// through the cost modeling.
1433   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1434     assert(VF.isVector() && "Expected VF to be a vector VF");
1435     // Cost model is not run in the VPlan-native path - return conservative
1436     // result until this changes.
1437     if (EnableVPlanNativePath)
1438       return CM_GatherScatter;
1439 
1440     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1441     auto Itr = WideningDecisions.find(InstOnVF);
1442     if (Itr == WideningDecisions.end())
1443       return CM_Unknown;
1444     return Itr->second.first;
1445   }
1446 
1447   /// Return the vectorization cost for the given instruction \p I and vector
1448   /// width \p VF.
1449   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1450     assert(VF.isVector() && "Expected VF >=2");
1451     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1452     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1453            "The cost is not calculated");
1454     return WideningDecisions[InstOnVF].second;
1455   }
1456 
1457   /// Return True if instruction \p I is an optimizable truncate whose operand
1458   /// is an induction variable. Such a truncate will be removed by adding a new
1459   /// induction variable with the destination type.
1460   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1461     // If the instruction is not a truncate, return false.
1462     auto *Trunc = dyn_cast<TruncInst>(I);
1463     if (!Trunc)
1464       return false;
1465 
1466     // Get the source and destination types of the truncate.
1467     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1468     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1469 
1470     // If the truncate is free for the given types, return false. Replacing a
1471     // free truncate with an induction variable would add an induction variable
1472     // update instruction to each iteration of the loop. We exclude from this
1473     // check the primary induction variable since it will need an update
1474     // instruction regardless.
1475     Value *Op = Trunc->getOperand(0);
1476     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1477       return false;
1478 
1479     // If the truncated value is not an induction variable, return false.
1480     return Legal->isInductionPhi(Op);
1481   }
1482 
1483   /// Collects the instructions to scalarize for each predicated instruction in
1484   /// the loop.
1485   void collectInstsToScalarize(ElementCount VF);
1486 
1487   /// Collect Uniform and Scalar values for the given \p VF.
1488   /// The sets depend on CM decision for Load/Store instructions
1489   /// that may be vectorized as interleave, gather-scatter or scalarized.
1490   void collectUniformsAndScalars(ElementCount VF) {
1491     // Do the analysis once.
1492     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1493       return;
1494     setCostBasedWideningDecision(VF);
1495     collectLoopUniforms(VF);
1496     collectLoopScalars(VF);
1497   }
1498 
1499   /// Returns true if the target machine supports masked store operation
1500   /// for the given \p DataType and kind of access to \p Ptr.
1501   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1502     return Legal->isConsecutivePtr(DataType, Ptr) &&
1503            TTI.isLegalMaskedStore(DataType, Alignment);
1504   }
1505 
1506   /// Returns true if the target machine supports masked load operation
1507   /// for the given \p DataType and kind of access to \p Ptr.
1508   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1509     return Legal->isConsecutivePtr(DataType, Ptr) &&
1510            TTI.isLegalMaskedLoad(DataType, Alignment);
1511   }
1512 
1513   /// Returns true if the target machine can represent \p V as a masked gather
1514   /// or scatter operation.
1515   bool isLegalGatherOrScatter(Value *V,
1516                               ElementCount VF = ElementCount::getFixed(1)) {
1517     bool LI = isa<LoadInst>(V);
1518     bool SI = isa<StoreInst>(V);
1519     if (!LI && !SI)
1520       return false;
1521     auto *Ty = getLoadStoreType(V);
1522     Align Align = getLoadStoreAlignment(V);
1523     if (VF.isVector())
1524       Ty = VectorType::get(Ty, VF);
1525     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1526            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1527   }
1528 
1529   /// Returns true if the target machine supports all of the reduction
1530   /// variables found for the given VF.
1531   bool canVectorizeReductions(ElementCount VF) const {
1532     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1533       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1534       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1535     }));
1536   }
1537 
1538   /// Returns true if \p I is an instruction that will be scalarized with
1539   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1540   /// instructions include conditional stores and instructions that may divide
1541   /// by zero.
1542   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1543 
1544   // Returns true if \p I is an instruction that will be predicated either
1545   // through scalar predication or masked load/store or masked gather/scatter.
1546   // \p VF is the vectorization factor that will be used to vectorize \p I.
1547   // Superset of instructions that return true for isScalarWithPredication.
1548   bool isPredicatedInst(Instruction *I, ElementCount VF,
1549                         bool IsKnownUniform = false) {
1550     // When we know the load is uniform and the original scalar loop was not
1551     // predicated we don't need to mark it as a predicated instruction. Any
1552     // vectorised blocks created when tail-folding are something artificial we
1553     // have introduced and we know there is always at least one active lane.
1554     // That's why we call Legal->blockNeedsPredication here because it doesn't
1555     // query tail-folding.
1556     if (IsKnownUniform && isa<LoadInst>(I) &&
1557         !Legal->blockNeedsPredication(I->getParent()))
1558       return false;
1559     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1560       return false;
1561     // Loads and stores that need some form of masked operation are predicated
1562     // instructions.
1563     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1564       return Legal->isMaskRequired(I);
1565     return isScalarWithPredication(I, VF);
1566   }
1567 
1568   /// Returns true if \p I is a memory instruction with consecutive memory
1569   /// access that can be widened.
1570   bool
1571   memoryInstructionCanBeWidened(Instruction *I,
1572                                 ElementCount VF = ElementCount::getFixed(1));
1573 
1574   /// Returns true if \p I is a memory instruction in an interleaved-group
1575   /// of memory accesses that can be vectorized with wide vector loads/stores
1576   /// and shuffles.
1577   bool
1578   interleavedAccessCanBeWidened(Instruction *I,
1579                                 ElementCount VF = ElementCount::getFixed(1));
1580 
1581   /// Check if \p Instr belongs to any interleaved access group.
1582   bool isAccessInterleaved(Instruction *Instr) {
1583     return InterleaveInfo.isInterleaved(Instr);
1584   }
1585 
1586   /// Get the interleaved access group that \p Instr belongs to.
1587   const InterleaveGroup<Instruction> *
1588   getInterleavedAccessGroup(Instruction *Instr) {
1589     return InterleaveInfo.getInterleaveGroup(Instr);
1590   }
1591 
1592   /// Returns true if we're required to use a scalar epilogue for at least
1593   /// the final iteration of the original loop.
1594   bool requiresScalarEpilogue(ElementCount VF) const {
1595     if (!isScalarEpilogueAllowed())
1596       return false;
1597     // If we might exit from anywhere but the latch, must run the exiting
1598     // iteration in scalar form.
1599     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1600       return true;
1601     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1602   }
1603 
1604   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1605   /// loop hint annotation.
1606   bool isScalarEpilogueAllowed() const {
1607     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1608   }
1609 
1610   /// Returns true if all loop blocks should be masked to fold tail loop.
1611   bool foldTailByMasking() const { return FoldTailByMasking; }
1612 
1613   /// Returns true if the instructions in this block requires predication
1614   /// for any reason, e.g. because tail folding now requires a predicate
1615   /// or because the block in the original loop was predicated.
1616   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1617     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1618   }
1619 
1620   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1621   /// nodes to the chain of instructions representing the reductions. Uses a
1622   /// MapVector to ensure deterministic iteration order.
1623   using ReductionChainMap =
1624       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1625 
1626   /// Return the chain of instructions representing an inloop reduction.
1627   const ReductionChainMap &getInLoopReductionChains() const {
1628     return InLoopReductionChains;
1629   }
1630 
1631   /// Returns true if the Phi is part of an inloop reduction.
1632   bool isInLoopReduction(PHINode *Phi) const {
1633     return InLoopReductionChains.count(Phi);
1634   }
1635 
1636   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1637   /// with factor VF.  Return the cost of the instruction, including
1638   /// scalarization overhead if it's needed.
1639   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1640 
1641   /// Estimate cost of a call instruction CI if it were vectorized with factor
1642   /// VF. Return the cost of the instruction, including scalarization overhead
1643   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1644   /// scalarized -
1645   /// i.e. either vector version isn't available, or is too expensive.
1646   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1647                                     bool &NeedToScalarize) const;
1648 
1649   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1650   /// that of B.
1651   bool isMoreProfitable(const VectorizationFactor &A,
1652                         const VectorizationFactor &B) const;
1653 
1654   /// Invalidates decisions already taken by the cost model.
1655   void invalidateCostModelingDecisions() {
1656     WideningDecisions.clear();
1657     Uniforms.clear();
1658     Scalars.clear();
1659   }
1660 
1661 private:
1662   unsigned NumPredStores = 0;
1663 
1664   /// Convenience function that returns the value of vscale_range iff
1665   /// vscale_range.min == vscale_range.max or otherwise returns the value
1666   /// returned by the corresponding TLI method.
1667   Optional<unsigned> getVScaleForTuning() const;
1668 
1669   /// \return An upper bound for the vectorization factors for both
1670   /// fixed and scalable vectorization, where the minimum-known number of
1671   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1672   /// disabled or unsupported, then the scalable part will be equal to
1673   /// ElementCount::getScalable(0).
1674   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1675                                            ElementCount UserVF,
1676                                            bool FoldTailByMasking);
1677 
1678   /// \return the maximized element count based on the targets vector
1679   /// registers and the loop trip-count, but limited to a maximum safe VF.
1680   /// This is a helper function of computeFeasibleMaxVF.
1681   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1682   /// issue that occurred on one of the buildbots which cannot be reproduced
1683   /// without having access to the properietary compiler (see comments on
1684   /// D98509). The issue is currently under investigation and this workaround
1685   /// will be removed as soon as possible.
1686   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1687                                        unsigned SmallestType,
1688                                        unsigned WidestType,
1689                                        const ElementCount &MaxSafeVF,
1690                                        bool FoldTailByMasking);
1691 
1692   /// \return the maximum legal scalable VF, based on the safe max number
1693   /// of elements.
1694   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1695 
1696   /// The vectorization cost is a combination of the cost itself and a boolean
1697   /// indicating whether any of the contributing operations will actually
1698   /// operate on vector values after type legalization in the backend. If this
1699   /// latter value is false, then all operations will be scalarized (i.e. no
1700   /// vectorization has actually taken place).
1701   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1702 
1703   /// Returns the expected execution cost. The unit of the cost does
1704   /// not matter because we use the 'cost' units to compare different
1705   /// vector widths. The cost that is returned is *not* normalized by
1706   /// the factor width. If \p Invalid is not nullptr, this function
1707   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1708   /// each instruction that has an Invalid cost for the given VF.
1709   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1710   VectorizationCostTy
1711   expectedCost(ElementCount VF,
1712                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1713 
1714   /// Returns the execution time cost of an instruction for a given vector
1715   /// width. Vector width of one means scalar.
1716   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1717 
1718   /// The cost-computation logic from getInstructionCost which provides
1719   /// the vector type as an output parameter.
1720   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1721                                      Type *&VectorTy);
1722 
1723   /// Return the cost of instructions in an inloop reduction pattern, if I is
1724   /// part of that pattern.
1725   Optional<InstructionCost>
1726   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1727                           TTI::TargetCostKind CostKind);
1728 
1729   /// Calculate vectorization cost of memory instruction \p I.
1730   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1731 
1732   /// The cost computation for scalarized memory instruction.
1733   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1734 
1735   /// The cost computation for interleaving group of memory instructions.
1736   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1737 
1738   /// The cost computation for Gather/Scatter instruction.
1739   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1740 
1741   /// The cost computation for widening instruction \p I with consecutive
1742   /// memory access.
1743   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1744 
1745   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1746   /// Load: scalar load + broadcast.
1747   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1748   /// element)
1749   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1750 
1751   /// Estimate the overhead of scalarizing an instruction. This is a
1752   /// convenience wrapper for the type-based getScalarizationOverhead API.
1753   InstructionCost getScalarizationOverhead(Instruction *I,
1754                                            ElementCount VF) const;
1755 
1756   /// Returns whether the instruction is a load or store and will be a emitted
1757   /// as a vector operation.
1758   bool isConsecutiveLoadOrStore(Instruction *I);
1759 
1760   /// Returns true if an artificially high cost for emulated masked memrefs
1761   /// should be used.
1762   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1763 
1764   /// Map of scalar integer values to the smallest bitwidth they can be legally
1765   /// represented as. The vector equivalents of these values should be truncated
1766   /// to this type.
1767   MapVector<Instruction *, uint64_t> MinBWs;
1768 
1769   /// A type representing the costs for instructions if they were to be
1770   /// scalarized rather than vectorized. The entries are Instruction-Cost
1771   /// pairs.
1772   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1773 
1774   /// A set containing all BasicBlocks that are known to present after
1775   /// vectorization as a predicated block.
1776   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1777 
1778   /// Records whether it is allowed to have the original scalar loop execute at
1779   /// least once. This may be needed as a fallback loop in case runtime
1780   /// aliasing/dependence checks fail, or to handle the tail/remainder
1781   /// iterations when the trip count is unknown or doesn't divide by the VF,
1782   /// or as a peel-loop to handle gaps in interleave-groups.
1783   /// Under optsize and when the trip count is very small we don't allow any
1784   /// iterations to execute in the scalar loop.
1785   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1786 
1787   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1788   bool FoldTailByMasking = false;
1789 
1790   /// A map holding scalar costs for different vectorization factors. The
1791   /// presence of a cost for an instruction in the mapping indicates that the
1792   /// instruction will be scalarized when vectorizing with the associated
1793   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1794   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1795 
1796   /// Holds the instructions known to be uniform after vectorization.
1797   /// The data is collected per VF.
1798   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1799 
1800   /// Holds the instructions known to be scalar after vectorization.
1801   /// The data is collected per VF.
1802   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1803 
1804   /// Holds the instructions (address computations) that are forced to be
1805   /// scalarized.
1806   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1807 
1808   /// PHINodes of the reductions that should be expanded in-loop along with
1809   /// their associated chains of reduction operations, in program order from top
1810   /// (PHI) to bottom
1811   ReductionChainMap InLoopReductionChains;
1812 
1813   /// A Map of inloop reduction operations and their immediate chain operand.
1814   /// FIXME: This can be removed once reductions can be costed correctly in
1815   /// vplan. This was added to allow quick lookup to the inloop operations,
1816   /// without having to loop through InLoopReductionChains.
1817   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1818 
1819   /// Returns the expected difference in cost from scalarizing the expression
1820   /// feeding a predicated instruction \p PredInst. The instructions to
1821   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1822   /// non-negative return value implies the expression will be scalarized.
1823   /// Currently, only single-use chains are considered for scalarization.
1824   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1825                               ElementCount VF);
1826 
1827   /// Collect the instructions that are uniform after vectorization. An
1828   /// instruction is uniform if we represent it with a single scalar value in
1829   /// the vectorized loop corresponding to each vector iteration. Examples of
1830   /// uniform instructions include pointer operands of consecutive or
1831   /// interleaved memory accesses. Note that although uniformity implies an
1832   /// instruction will be scalar, the reverse is not true. In general, a
1833   /// scalarized instruction will be represented by VF scalar values in the
1834   /// vectorized loop, each corresponding to an iteration of the original
1835   /// scalar loop.
1836   void collectLoopUniforms(ElementCount VF);
1837 
1838   /// Collect the instructions that are scalar after vectorization. An
1839   /// instruction is scalar if it is known to be uniform or will be scalarized
1840   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1841   /// to the list if they are used by a load/store instruction that is marked as
1842   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1843   /// VF values in the vectorized loop, each corresponding to an iteration of
1844   /// the original scalar loop.
1845   void collectLoopScalars(ElementCount VF);
1846 
1847   /// Keeps cost model vectorization decision and cost for instructions.
1848   /// Right now it is used for memory instructions only.
1849   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1850                                 std::pair<InstWidening, InstructionCost>>;
1851 
1852   DecisionList WideningDecisions;
1853 
1854   /// Returns true if \p V is expected to be vectorized and it needs to be
1855   /// extracted.
1856   bool needsExtract(Value *V, ElementCount VF) const {
1857     Instruction *I = dyn_cast<Instruction>(V);
1858     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1859         TheLoop->isLoopInvariant(I))
1860       return false;
1861 
1862     // Assume we can vectorize V (and hence we need extraction) if the
1863     // scalars are not computed yet. This can happen, because it is called
1864     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1865     // the scalars are collected. That should be a safe assumption in most
1866     // cases, because we check if the operands have vectorizable types
1867     // beforehand in LoopVectorizationLegality.
1868     return Scalars.find(VF) == Scalars.end() ||
1869            !isScalarAfterVectorization(I, VF);
1870   };
1871 
1872   /// Returns a range containing only operands needing to be extracted.
1873   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1874                                                    ElementCount VF) const {
1875     return SmallVector<Value *, 4>(make_filter_range(
1876         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1877   }
1878 
1879   /// Determines if we have the infrastructure to vectorize loop \p L and its
1880   /// epilogue, assuming the main loop is vectorized by \p VF.
1881   bool isCandidateForEpilogueVectorization(const Loop &L,
1882                                            const ElementCount VF) const;
1883 
1884   /// Returns true if epilogue vectorization is considered profitable, and
1885   /// false otherwise.
1886   /// \p VF is the vectorization factor chosen for the original loop.
1887   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1888 
1889 public:
1890   /// The loop that we evaluate.
1891   Loop *TheLoop;
1892 
1893   /// Predicated scalar evolution analysis.
1894   PredicatedScalarEvolution &PSE;
1895 
1896   /// Loop Info analysis.
1897   LoopInfo *LI;
1898 
1899   /// Vectorization legality.
1900   LoopVectorizationLegality *Legal;
1901 
1902   /// Vector target information.
1903   const TargetTransformInfo &TTI;
1904 
1905   /// Target Library Info.
1906   const TargetLibraryInfo *TLI;
1907 
1908   /// Demanded bits analysis.
1909   DemandedBits *DB;
1910 
1911   /// Assumption cache.
1912   AssumptionCache *AC;
1913 
1914   /// Interface to emit optimization remarks.
1915   OptimizationRemarkEmitter *ORE;
1916 
1917   const Function *TheFunction;
1918 
1919   /// Loop Vectorize Hint.
1920   const LoopVectorizeHints *Hints;
1921 
1922   /// The interleave access information contains groups of interleaved accesses
1923   /// with the same stride and close to each other.
1924   InterleavedAccessInfo &InterleaveInfo;
1925 
1926   /// Values to ignore in the cost model.
1927   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1928 
1929   /// Values to ignore in the cost model when VF > 1.
1930   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1931 
1932   /// All element types found in the loop.
1933   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1934 
1935   /// Profitable vector factors.
1936   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1937 };
1938 } // end namespace llvm
1939 
1940 /// Helper struct to manage generating runtime checks for vectorization.
1941 ///
1942 /// The runtime checks are created up-front in temporary blocks to allow better
1943 /// estimating the cost and un-linked from the existing IR. After deciding to
1944 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1945 /// temporary blocks are completely removed.
1946 class GeneratedRTChecks {
1947   /// Basic block which contains the generated SCEV checks, if any.
1948   BasicBlock *SCEVCheckBlock = nullptr;
1949 
1950   /// The value representing the result of the generated SCEV checks. If it is
1951   /// nullptr, either no SCEV checks have been generated or they have been used.
1952   Value *SCEVCheckCond = nullptr;
1953 
1954   /// Basic block which contains the generated memory runtime checks, if any.
1955   BasicBlock *MemCheckBlock = nullptr;
1956 
1957   /// The value representing the result of the generated memory runtime checks.
1958   /// If it is nullptr, either no memory runtime checks have been generated or
1959   /// they have been used.
1960   Value *MemRuntimeCheckCond = nullptr;
1961 
1962   DominatorTree *DT;
1963   LoopInfo *LI;
1964 
1965   SCEVExpander SCEVExp;
1966   SCEVExpander MemCheckExp;
1967 
1968 public:
1969   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1970                     const DataLayout &DL)
1971       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1972         MemCheckExp(SE, DL, "scev.check") {}
1973 
1974   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1975   /// accurately estimate the cost of the runtime checks. The blocks are
1976   /// un-linked from the IR and is added back during vector code generation. If
1977   /// there is no vector code generation, the check blocks are removed
1978   /// completely.
1979   void Create(Loop *L, const LoopAccessInfo &LAI,
1980               const SCEVPredicate &Pred) {
1981 
1982     BasicBlock *LoopHeader = L->getHeader();
1983     BasicBlock *Preheader = L->getLoopPreheader();
1984 
1985     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1986     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1987     // may be used by SCEVExpander. The blocks will be un-linked from their
1988     // predecessors and removed from LI & DT at the end of the function.
1989     if (!Pred.isAlwaysTrue()) {
1990       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1991                                   nullptr, "vector.scevcheck");
1992 
1993       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1994           &Pred, SCEVCheckBlock->getTerminator());
1995     }
1996 
1997     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1998     if (RtPtrChecking.Need) {
1999       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2000       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2001                                  "vector.memcheck");
2002 
2003       MemRuntimeCheckCond =
2004           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2005                            RtPtrChecking.getChecks(), MemCheckExp);
2006       assert(MemRuntimeCheckCond &&
2007              "no RT checks generated although RtPtrChecking "
2008              "claimed checks are required");
2009     }
2010 
2011     if (!MemCheckBlock && !SCEVCheckBlock)
2012       return;
2013 
2014     // Unhook the temporary block with the checks, update various places
2015     // accordingly.
2016     if (SCEVCheckBlock)
2017       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2018     if (MemCheckBlock)
2019       MemCheckBlock->replaceAllUsesWith(Preheader);
2020 
2021     if (SCEVCheckBlock) {
2022       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2023       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2024       Preheader->getTerminator()->eraseFromParent();
2025     }
2026     if (MemCheckBlock) {
2027       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2028       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2029       Preheader->getTerminator()->eraseFromParent();
2030     }
2031 
2032     DT->changeImmediateDominator(LoopHeader, Preheader);
2033     if (MemCheckBlock) {
2034       DT->eraseNode(MemCheckBlock);
2035       LI->removeBlock(MemCheckBlock);
2036     }
2037     if (SCEVCheckBlock) {
2038       DT->eraseNode(SCEVCheckBlock);
2039       LI->removeBlock(SCEVCheckBlock);
2040     }
2041   }
2042 
2043   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2044   /// unused.
2045   ~GeneratedRTChecks() {
2046     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2047     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2048     if (!SCEVCheckCond)
2049       SCEVCleaner.markResultUsed();
2050 
2051     if (!MemRuntimeCheckCond)
2052       MemCheckCleaner.markResultUsed();
2053 
2054     if (MemRuntimeCheckCond) {
2055       auto &SE = *MemCheckExp.getSE();
2056       // Memory runtime check generation creates compares that use expanded
2057       // values. Remove them before running the SCEVExpanderCleaners.
2058       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2059         if (MemCheckExp.isInsertedInstruction(&I))
2060           continue;
2061         SE.forgetValue(&I);
2062         I.eraseFromParent();
2063       }
2064     }
2065     MemCheckCleaner.cleanup();
2066     SCEVCleaner.cleanup();
2067 
2068     if (SCEVCheckCond)
2069       SCEVCheckBlock->eraseFromParent();
2070     if (MemRuntimeCheckCond)
2071       MemCheckBlock->eraseFromParent();
2072   }
2073 
2074   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2075   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2076   /// depending on the generated condition.
2077   BasicBlock *emitSCEVChecks(BasicBlock *Bypass,
2078                              BasicBlock *LoopVectorPreHeader,
2079                              BasicBlock *LoopExitBlock) {
2080     if (!SCEVCheckCond)
2081       return nullptr;
2082     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2083       if (C->isZero())
2084         return nullptr;
2085 
2086     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2087 
2088     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2089     // Create new preheader for vector loop.
2090     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2091       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2092 
2093     SCEVCheckBlock->getTerminator()->eraseFromParent();
2094     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2095     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2096                                                 SCEVCheckBlock);
2097 
2098     DT->addNewBlock(SCEVCheckBlock, Pred);
2099     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2100 
2101     ReplaceInstWithInst(
2102         SCEVCheckBlock->getTerminator(),
2103         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2104     // Mark the check as used, to prevent it from being removed during cleanup.
2105     SCEVCheckCond = nullptr;
2106     return SCEVCheckBlock;
2107   }
2108 
2109   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2110   /// the branches to branch to the vector preheader or \p Bypass, depending on
2111   /// the generated condition.
2112   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2113                                    BasicBlock *LoopVectorPreHeader) {
2114     // Check if we generated code that checks in runtime if arrays overlap.
2115     if (!MemRuntimeCheckCond)
2116       return nullptr;
2117 
2118     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2119     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2120                                                 MemCheckBlock);
2121 
2122     DT->addNewBlock(MemCheckBlock, Pred);
2123     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2124     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2125 
2126     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2127       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2128 
2129     ReplaceInstWithInst(
2130         MemCheckBlock->getTerminator(),
2131         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2132     MemCheckBlock->getTerminator()->setDebugLoc(
2133         Pred->getTerminator()->getDebugLoc());
2134 
2135     // Mark the check as used, to prevent it from being removed during cleanup.
2136     MemRuntimeCheckCond = nullptr;
2137     return MemCheckBlock;
2138   }
2139 };
2140 
2141 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2142 // vectorization. The loop needs to be annotated with #pragma omp simd
2143 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2144 // vector length information is not provided, vectorization is not considered
2145 // explicit. Interleave hints are not allowed either. These limitations will be
2146 // relaxed in the future.
2147 // Please, note that we are currently forced to abuse the pragma 'clang
2148 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2149 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2150 // provides *explicit vectorization hints* (LV can bypass legal checks and
2151 // assume that vectorization is legal). However, both hints are implemented
2152 // using the same metadata (llvm.loop.vectorize, processed by
2153 // LoopVectorizeHints). This will be fixed in the future when the native IR
2154 // representation for pragma 'omp simd' is introduced.
2155 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2156                                    OptimizationRemarkEmitter *ORE) {
2157   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2158   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2159 
2160   // Only outer loops with an explicit vectorization hint are supported.
2161   // Unannotated outer loops are ignored.
2162   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2163     return false;
2164 
2165   Function *Fn = OuterLp->getHeader()->getParent();
2166   if (!Hints.allowVectorization(Fn, OuterLp,
2167                                 true /*VectorizeOnlyWhenForced*/)) {
2168     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2169     return false;
2170   }
2171 
2172   if (Hints.getInterleave() > 1) {
2173     // TODO: Interleave support is future work.
2174     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2175                          "outer loops.\n");
2176     Hints.emitRemarkWithHints();
2177     return false;
2178   }
2179 
2180   return true;
2181 }
2182 
2183 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2184                                   OptimizationRemarkEmitter *ORE,
2185                                   SmallVectorImpl<Loop *> &V) {
2186   // Collect inner loops and outer loops without irreducible control flow. For
2187   // now, only collect outer loops that have explicit vectorization hints. If we
2188   // are stress testing the VPlan H-CFG construction, we collect the outermost
2189   // loop of every loop nest.
2190   if (L.isInnermost() || VPlanBuildStressTest ||
2191       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2192     LoopBlocksRPO RPOT(&L);
2193     RPOT.perform(LI);
2194     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2195       V.push_back(&L);
2196       // TODO: Collect inner loops inside marked outer loops in case
2197       // vectorization fails for the outer loop. Do not invoke
2198       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2199       // already known to be reducible. We can use an inherited attribute for
2200       // that.
2201       return;
2202     }
2203   }
2204   for (Loop *InnerL : L)
2205     collectSupportedLoops(*InnerL, LI, ORE, V);
2206 }
2207 
2208 namespace {
2209 
2210 /// The LoopVectorize Pass.
2211 struct LoopVectorize : public FunctionPass {
2212   /// Pass identification, replacement for typeid
2213   static char ID;
2214 
2215   LoopVectorizePass Impl;
2216 
2217   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2218                          bool VectorizeOnlyWhenForced = false)
2219       : FunctionPass(ID),
2220         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2221     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2222   }
2223 
2224   bool runOnFunction(Function &F) override {
2225     if (skipFunction(F))
2226       return false;
2227 
2228     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2229     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2230     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2231     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2232     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2233     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2234     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2235     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2236     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2237     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2238     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2239     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2240     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2241 
2242     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2243         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2244 
2245     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2246                         GetLAA, *ORE, PSI).MadeAnyChange;
2247   }
2248 
2249   void getAnalysisUsage(AnalysisUsage &AU) const override {
2250     AU.addRequired<AssumptionCacheTracker>();
2251     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2252     AU.addRequired<DominatorTreeWrapperPass>();
2253     AU.addRequired<LoopInfoWrapperPass>();
2254     AU.addRequired<ScalarEvolutionWrapperPass>();
2255     AU.addRequired<TargetTransformInfoWrapperPass>();
2256     AU.addRequired<AAResultsWrapperPass>();
2257     AU.addRequired<LoopAccessLegacyAnalysis>();
2258     AU.addRequired<DemandedBitsWrapperPass>();
2259     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2260     AU.addRequired<InjectTLIMappingsLegacy>();
2261 
2262     // We currently do not preserve loopinfo/dominator analyses with outer loop
2263     // vectorization. Until this is addressed, mark these analyses as preserved
2264     // only for non-VPlan-native path.
2265     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2266     if (!EnableVPlanNativePath) {
2267       AU.addPreserved<LoopInfoWrapperPass>();
2268       AU.addPreserved<DominatorTreeWrapperPass>();
2269     }
2270 
2271     AU.addPreserved<BasicAAWrapperPass>();
2272     AU.addPreserved<GlobalsAAWrapperPass>();
2273     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2274   }
2275 };
2276 
2277 } // end anonymous namespace
2278 
2279 //===----------------------------------------------------------------------===//
2280 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2281 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2282 //===----------------------------------------------------------------------===//
2283 
2284 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2285   // We need to place the broadcast of invariant variables outside the loop,
2286   // but only if it's proven safe to do so. Else, broadcast will be inside
2287   // vector loop body.
2288   Instruction *Instr = dyn_cast<Instruction>(V);
2289   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2290                      (!Instr ||
2291                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2292   // Place the code for broadcasting invariant variables in the new preheader.
2293   IRBuilder<>::InsertPointGuard Guard(Builder);
2294   if (SafeToHoist)
2295     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2296 
2297   // Broadcast the scalar into all locations in the vector.
2298   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2299 
2300   return Shuf;
2301 }
2302 
2303 /// This function adds
2304 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2305 /// to each vector element of Val. The sequence starts at StartIndex.
2306 /// \p Opcode is relevant for FP induction variable.
2307 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2308                             Instruction::BinaryOps BinOp, ElementCount VF,
2309                             IRBuilderBase &Builder) {
2310   assert(VF.isVector() && "only vector VFs are supported");
2311 
2312   // Create and check the types.
2313   auto *ValVTy = cast<VectorType>(Val->getType());
2314   ElementCount VLen = ValVTy->getElementCount();
2315 
2316   Type *STy = Val->getType()->getScalarType();
2317   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2318          "Induction Step must be an integer or FP");
2319   assert(Step->getType() == STy && "Step has wrong type");
2320 
2321   SmallVector<Constant *, 8> Indices;
2322 
2323   // Create a vector of consecutive numbers from zero to VF.
2324   VectorType *InitVecValVTy = ValVTy;
2325   if (STy->isFloatingPointTy()) {
2326     Type *InitVecValSTy =
2327         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2328     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2329   }
2330   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2331 
2332   // Splat the StartIdx
2333   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2334 
2335   if (STy->isIntegerTy()) {
2336     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2337     Step = Builder.CreateVectorSplat(VLen, Step);
2338     assert(Step->getType() == Val->getType() && "Invalid step vec");
2339     // FIXME: The newly created binary instructions should contain nsw/nuw
2340     // flags, which can be found from the original scalar operations.
2341     Step = Builder.CreateMul(InitVec, Step);
2342     return Builder.CreateAdd(Val, Step, "induction");
2343   }
2344 
2345   // Floating point induction.
2346   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2347          "Binary Opcode should be specified for FP induction");
2348   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2349   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2350 
2351   Step = Builder.CreateVectorSplat(VLen, Step);
2352   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2353   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2354 }
2355 
2356 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2357 /// variable on which to base the steps, \p Step is the size of the step.
2358 static void buildScalarSteps(Value *ScalarIV, Value *Step,
2359                              const InductionDescriptor &ID, VPValue *Def,
2360                              VPTransformState &State) {
2361   IRBuilderBase &Builder = State.Builder;
2362   // We shouldn't have to build scalar steps if we aren't vectorizing.
2363   assert(State.VF.isVector() && "VF should be greater than one");
2364   // Get the value type and ensure it and the step have the same integer type.
2365   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2366   assert(ScalarIVTy == Step->getType() &&
2367          "Val and Step should have the same type");
2368 
2369   // We build scalar steps for both integer and floating-point induction
2370   // variables. Here, we determine the kind of arithmetic we will perform.
2371   Instruction::BinaryOps AddOp;
2372   Instruction::BinaryOps MulOp;
2373   if (ScalarIVTy->isIntegerTy()) {
2374     AddOp = Instruction::Add;
2375     MulOp = Instruction::Mul;
2376   } else {
2377     AddOp = ID.getInductionOpcode();
2378     MulOp = Instruction::FMul;
2379   }
2380 
2381   // Determine the number of scalars we need to generate for each unroll
2382   // iteration.
2383   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
2384   unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2385   // Compute the scalar steps and save the results in State.
2386   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2387                                      ScalarIVTy->getScalarSizeInBits());
2388   Type *VecIVTy = nullptr;
2389   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2390   if (!FirstLaneOnly && State.VF.isScalable()) {
2391     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2392     UnitStepVec =
2393         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2394     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2395     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2396   }
2397 
2398   for (unsigned Part = 0; Part < State.UF; ++Part) {
2399     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2400 
2401     if (!FirstLaneOnly && State.VF.isScalable()) {
2402       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2403       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2404       if (ScalarIVTy->isFloatingPointTy())
2405         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2406       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2407       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2408       State.set(Def, Add, Part);
2409       // It's useful to record the lane values too for the known minimum number
2410       // of elements so we do those below. This improves the code quality when
2411       // trying to extract the first element, for example.
2412     }
2413 
2414     if (ScalarIVTy->isFloatingPointTy())
2415       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2416 
2417     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2418       Value *StartIdx = Builder.CreateBinOp(
2419           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2420       // The step returned by `createStepForVF` is a runtime-evaluated value
2421       // when VF is scalable. Otherwise, it should be folded into a Constant.
2422       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2423              "Expected StartIdx to be folded to a constant when VF is not "
2424              "scalable");
2425       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2426       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2427       State.set(Def, Add, VPIteration(Part, Lane));
2428     }
2429   }
2430 }
2431 
2432 // Generate code for the induction step. Note that induction steps are
2433 // required to be loop-invariant
2434 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE,
2435                               Instruction *InsertBefore,
2436                               Loop *OrigLoop = nullptr) {
2437   const DataLayout &DL = SE.getDataLayout();
2438   assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) &&
2439          "Induction step should be loop invariant");
2440   if (auto *E = dyn_cast<SCEVUnknown>(Step))
2441     return E->getValue();
2442 
2443   SCEVExpander Exp(SE, DL, "induction");
2444   return Exp.expandCodeFor(Step, Step->getType(), InsertBefore);
2445 }
2446 
2447 /// Compute the transformed value of Index at offset StartValue using step
2448 /// StepValue.
2449 /// For integer induction, returns StartValue + Index * StepValue.
2450 /// For pointer induction, returns StartValue[Index * StepValue].
2451 /// FIXME: The newly created binary instructions should contain nsw/nuw
2452 /// flags, which can be found from the original scalar operations.
2453 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index,
2454                                    Value *StartValue, Value *Step,
2455                                    const InductionDescriptor &ID) {
2456   assert(Index->getType()->getScalarType() == Step->getType() &&
2457          "Index scalar type does not match StepValue type");
2458 
2459   // Note: the IR at this point is broken. We cannot use SE to create any new
2460   // SCEV and then expand it, hoping that SCEV's simplification will give us
2461   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2462   // lead to various SCEV crashes. So all we can do is to use builder and rely
2463   // on InstCombine for future simplifications. Here we handle some trivial
2464   // cases only.
2465   auto CreateAdd = [&B](Value *X, Value *Y) {
2466     assert(X->getType() == Y->getType() && "Types don't match!");
2467     if (auto *CX = dyn_cast<ConstantInt>(X))
2468       if (CX->isZero())
2469         return Y;
2470     if (auto *CY = dyn_cast<ConstantInt>(Y))
2471       if (CY->isZero())
2472         return X;
2473     return B.CreateAdd(X, Y);
2474   };
2475 
2476   // We allow X to be a vector type, in which case Y will potentially be
2477   // splatted into a vector with the same element count.
2478   auto CreateMul = [&B](Value *X, Value *Y) {
2479     assert(X->getType()->getScalarType() == Y->getType() &&
2480            "Types don't match!");
2481     if (auto *CX = dyn_cast<ConstantInt>(X))
2482       if (CX->isOne())
2483         return Y;
2484     if (auto *CY = dyn_cast<ConstantInt>(Y))
2485       if (CY->isOne())
2486         return X;
2487     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2488     if (XVTy && !isa<VectorType>(Y->getType()))
2489       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2490     return B.CreateMul(X, Y);
2491   };
2492 
2493   switch (ID.getKind()) {
2494   case InductionDescriptor::IK_IntInduction: {
2495     assert(!isa<VectorType>(Index->getType()) &&
2496            "Vector indices not supported for integer inductions yet");
2497     assert(Index->getType() == StartValue->getType() &&
2498            "Index type does not match StartValue type");
2499     if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2500       return B.CreateSub(StartValue, Index);
2501     auto *Offset = CreateMul(Index, Step);
2502     return CreateAdd(StartValue, Offset);
2503   }
2504   case InductionDescriptor::IK_PtrInduction: {
2505     assert(isa<Constant>(Step) &&
2506            "Expected constant step for pointer induction");
2507     return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step));
2508   }
2509   case InductionDescriptor::IK_FpInduction: {
2510     assert(!isa<VectorType>(Index->getType()) &&
2511            "Vector indices not supported for FP inductions yet");
2512     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2513     auto InductionBinOp = ID.getInductionBinOp();
2514     assert(InductionBinOp &&
2515            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2516             InductionBinOp->getOpcode() == Instruction::FSub) &&
2517            "Original bin op should be defined for FP induction");
2518 
2519     Value *MulExp = B.CreateFMul(Step, Index);
2520     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2521                          "induction");
2522   }
2523   case InductionDescriptor::IK_NoInduction:
2524     return nullptr;
2525   }
2526   llvm_unreachable("invalid enum");
2527 }
2528 
2529 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2530                                                     const VPIteration &Instance,
2531                                                     VPTransformState &State) {
2532   Value *ScalarInst = State.get(Def, Instance);
2533   Value *VectorValue = State.get(Def, Instance.Part);
2534   VectorValue = Builder.CreateInsertElement(
2535       VectorValue, ScalarInst,
2536       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2537   State.set(Def, VectorValue, Instance.Part);
2538 }
2539 
2540 // Return whether we allow using masked interleave-groups (for dealing with
2541 // strided loads/stores that reside in predicated blocks, or for dealing
2542 // with gaps).
2543 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2544   // If an override option has been passed in for interleaved accesses, use it.
2545   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2546     return EnableMaskedInterleavedMemAccesses;
2547 
2548   return TTI.enableMaskedInterleavedAccessVectorization();
2549 }
2550 
2551 // Try to vectorize the interleave group that \p Instr belongs to.
2552 //
2553 // E.g. Translate following interleaved load group (factor = 3):
2554 //   for (i = 0; i < N; i+=3) {
2555 //     R = Pic[i];             // Member of index 0
2556 //     G = Pic[i+1];           // Member of index 1
2557 //     B = Pic[i+2];           // Member of index 2
2558 //     ... // do something to R, G, B
2559 //   }
2560 // To:
2561 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2562 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2563 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2564 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2565 //
2566 // Or translate following interleaved store group (factor = 3):
2567 //   for (i = 0; i < N; i+=3) {
2568 //     ... do something to R, G, B
2569 //     Pic[i]   = R;           // Member of index 0
2570 //     Pic[i+1] = G;           // Member of index 1
2571 //     Pic[i+2] = B;           // Member of index 2
2572 //   }
2573 // To:
2574 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2575 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2576 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2577 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2578 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2579 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2580     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2581     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2582     VPValue *BlockInMask) {
2583   Instruction *Instr = Group->getInsertPos();
2584   const DataLayout &DL = Instr->getModule()->getDataLayout();
2585 
2586   // Prepare for the vector type of the interleaved load/store.
2587   Type *ScalarTy = getLoadStoreType(Instr);
2588   unsigned InterleaveFactor = Group->getFactor();
2589   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2590   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2591 
2592   // Prepare for the new pointers.
2593   SmallVector<Value *, 2> AddrParts;
2594   unsigned Index = Group->getIndex(Instr);
2595 
2596   // TODO: extend the masked interleaved-group support to reversed access.
2597   assert((!BlockInMask || !Group->isReverse()) &&
2598          "Reversed masked interleave-group not supported.");
2599 
2600   // If the group is reverse, adjust the index to refer to the last vector lane
2601   // instead of the first. We adjust the index from the first vector lane,
2602   // rather than directly getting the pointer for lane VF - 1, because the
2603   // pointer operand of the interleaved access is supposed to be uniform. For
2604   // uniform instructions, we're only required to generate a value for the
2605   // first vector lane in each unroll iteration.
2606   if (Group->isReverse())
2607     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2608 
2609   for (unsigned Part = 0; Part < UF; Part++) {
2610     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2611     setDebugLocFromInst(AddrPart);
2612 
2613     // Notice current instruction could be any index. Need to adjust the address
2614     // to the member of index 0.
2615     //
2616     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2617     //       b = A[i];       // Member of index 0
2618     // Current pointer is pointed to A[i+1], adjust it to A[i].
2619     //
2620     // E.g.  A[i+1] = a;     // Member of index 1
2621     //       A[i]   = b;     // Member of index 0
2622     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2623     // Current pointer is pointed to A[i+2], adjust it to A[i].
2624 
2625     bool InBounds = false;
2626     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2627       InBounds = gep->isInBounds();
2628     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2629     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2630 
2631     // Cast to the vector pointer type.
2632     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2633     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2634     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2635   }
2636 
2637   setDebugLocFromInst(Instr);
2638   Value *PoisonVec = PoisonValue::get(VecTy);
2639 
2640   Value *MaskForGaps = nullptr;
2641   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2642     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2643     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2644   }
2645 
2646   // Vectorize the interleaved load group.
2647   if (isa<LoadInst>(Instr)) {
2648     // For each unroll part, create a wide load for the group.
2649     SmallVector<Value *, 2> NewLoads;
2650     for (unsigned Part = 0; Part < UF; Part++) {
2651       Instruction *NewLoad;
2652       if (BlockInMask || MaskForGaps) {
2653         assert(useMaskedInterleavedAccesses(*TTI) &&
2654                "masked interleaved groups are not allowed.");
2655         Value *GroupMask = MaskForGaps;
2656         if (BlockInMask) {
2657           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2658           Value *ShuffledMask = Builder.CreateShuffleVector(
2659               BlockInMaskPart,
2660               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2661               "interleaved.mask");
2662           GroupMask = MaskForGaps
2663                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2664                                                 MaskForGaps)
2665                           : ShuffledMask;
2666         }
2667         NewLoad =
2668             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2669                                      GroupMask, PoisonVec, "wide.masked.vec");
2670       }
2671       else
2672         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2673                                             Group->getAlign(), "wide.vec");
2674       Group->addMetadata(NewLoad);
2675       NewLoads.push_back(NewLoad);
2676     }
2677 
2678     // For each member in the group, shuffle out the appropriate data from the
2679     // wide loads.
2680     unsigned J = 0;
2681     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2682       Instruction *Member = Group->getMember(I);
2683 
2684       // Skip the gaps in the group.
2685       if (!Member)
2686         continue;
2687 
2688       auto StrideMask =
2689           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2690       for (unsigned Part = 0; Part < UF; Part++) {
2691         Value *StridedVec = Builder.CreateShuffleVector(
2692             NewLoads[Part], StrideMask, "strided.vec");
2693 
2694         // If this member has different type, cast the result type.
2695         if (Member->getType() != ScalarTy) {
2696           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2697           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2698           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2699         }
2700 
2701         if (Group->isReverse())
2702           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2703 
2704         State.set(VPDefs[J], StridedVec, Part);
2705       }
2706       ++J;
2707     }
2708     return;
2709   }
2710 
2711   // The sub vector type for current instruction.
2712   auto *SubVT = VectorType::get(ScalarTy, VF);
2713 
2714   // Vectorize the interleaved store group.
2715   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2716   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2717          "masked interleaved groups are not allowed.");
2718   assert((!MaskForGaps || !VF.isScalable()) &&
2719          "masking gaps for scalable vectors is not yet supported.");
2720   for (unsigned Part = 0; Part < UF; Part++) {
2721     // Collect the stored vector from each member.
2722     SmallVector<Value *, 4> StoredVecs;
2723     for (unsigned i = 0; i < InterleaveFactor; i++) {
2724       assert((Group->getMember(i) || MaskForGaps) &&
2725              "Fail to get a member from an interleaved store group");
2726       Instruction *Member = Group->getMember(i);
2727 
2728       // Skip the gaps in the group.
2729       if (!Member) {
2730         Value *Undef = PoisonValue::get(SubVT);
2731         StoredVecs.push_back(Undef);
2732         continue;
2733       }
2734 
2735       Value *StoredVec = State.get(StoredValues[i], Part);
2736 
2737       if (Group->isReverse())
2738         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2739 
2740       // If this member has different type, cast it to a unified type.
2741 
2742       if (StoredVec->getType() != SubVT)
2743         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2744 
2745       StoredVecs.push_back(StoredVec);
2746     }
2747 
2748     // Concatenate all vectors into a wide vector.
2749     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2750 
2751     // Interleave the elements in the wide vector.
2752     Value *IVec = Builder.CreateShuffleVector(
2753         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2754         "interleaved.vec");
2755 
2756     Instruction *NewStoreInstr;
2757     if (BlockInMask || MaskForGaps) {
2758       Value *GroupMask = MaskForGaps;
2759       if (BlockInMask) {
2760         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2761         Value *ShuffledMask = Builder.CreateShuffleVector(
2762             BlockInMaskPart,
2763             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2764             "interleaved.mask");
2765         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2766                                                       ShuffledMask, MaskForGaps)
2767                                 : ShuffledMask;
2768       }
2769       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2770                                                 Group->getAlign(), GroupMask);
2771     } else
2772       NewStoreInstr =
2773           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2774 
2775     Group->addMetadata(NewStoreInstr);
2776   }
2777 }
2778 
2779 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2780                                                VPReplicateRecipe *RepRecipe,
2781                                                const VPIteration &Instance,
2782                                                bool IfPredicateInstr,
2783                                                VPTransformState &State) {
2784   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2785 
2786   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2787   // the first lane and part.
2788   if (isa<NoAliasScopeDeclInst>(Instr))
2789     if (!Instance.isFirstIteration())
2790       return;
2791 
2792   setDebugLocFromInst(Instr);
2793 
2794   // Does this instruction return a value ?
2795   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2796 
2797   Instruction *Cloned = Instr->clone();
2798   if (!IsVoidRetTy)
2799     Cloned->setName(Instr->getName() + ".cloned");
2800 
2801   // If the scalarized instruction contributes to the address computation of a
2802   // widen masked load/store which was in a basic block that needed predication
2803   // and is not predicated after vectorization, we can't propagate
2804   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2805   // instruction could feed a poison value to the base address of the widen
2806   // load/store.
2807   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2808     Cloned->dropPoisonGeneratingFlags();
2809 
2810   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2811                                Builder.GetInsertPoint());
2812   // Replace the operands of the cloned instructions with their scalar
2813   // equivalents in the new loop.
2814   for (auto &I : enumerate(RepRecipe->operands())) {
2815     auto InputInstance = Instance;
2816     VPValue *Operand = I.value();
2817     VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand);
2818     if (OperandR && OperandR->isUniform())
2819       InputInstance.Lane = VPLane::getFirstLane();
2820     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2821   }
2822   addNewMetadata(Cloned, Instr);
2823 
2824   // Place the cloned scalar in the new loop.
2825   Builder.Insert(Cloned);
2826 
2827   State.set(RepRecipe, Cloned, Instance);
2828 
2829   // If we just cloned a new assumption, add it the assumption cache.
2830   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2831     AC->registerAssumption(II);
2832 
2833   // End if-block.
2834   if (IfPredicateInstr)
2835     PredicatedInstructions.push_back(Cloned);
2836 }
2837 
2838 void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
2839   BasicBlock *Header = L->getHeader();
2840   assert(!L->getLoopLatch() && "loop should not have a latch at this point");
2841 
2842   IRBuilder<> B(Header->getTerminator());
2843   Instruction *OldInst =
2844       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
2845   setDebugLocFromInst(OldInst, &B);
2846 
2847   // Connect the header to the exit and header blocks and replace the old
2848   // terminator.
2849   B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
2850 
2851   // Now we have two terminators. Remove the old one from the block.
2852   Header->getTerminator()->eraseFromParent();
2853 }
2854 
2855 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2856   if (TripCount)
2857     return TripCount;
2858 
2859   assert(L && "Create Trip Count for null loop.");
2860   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2861   // Find the loop boundaries.
2862   ScalarEvolution *SE = PSE.getSE();
2863   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2864   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2865          "Invalid loop count");
2866 
2867   Type *IdxTy = Legal->getWidestInductionType();
2868   assert(IdxTy && "No type for induction");
2869 
2870   // The exit count might have the type of i64 while the phi is i32. This can
2871   // happen if we have an induction variable that is sign extended before the
2872   // compare. The only way that we get a backedge taken count is that the
2873   // induction variable was signed and as such will not overflow. In such a case
2874   // truncation is legal.
2875   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2876       IdxTy->getPrimitiveSizeInBits())
2877     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2878   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2879 
2880   // Get the total trip count from the count by adding 1.
2881   const SCEV *ExitCount = SE->getAddExpr(
2882       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2883 
2884   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2885 
2886   // Expand the trip count and place the new instructions in the preheader.
2887   // Notice that the pre-header does not change, only the loop body.
2888   SCEVExpander Exp(*SE, DL, "induction");
2889 
2890   // Count holds the overall loop count (N).
2891   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2892                                 L->getLoopPreheader()->getTerminator());
2893 
2894   if (TripCount->getType()->isPointerTy())
2895     TripCount =
2896         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2897                                     L->getLoopPreheader()->getTerminator());
2898 
2899   return TripCount;
2900 }
2901 
2902 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2903   if (VectorTripCount)
2904     return VectorTripCount;
2905 
2906   Value *TC = getOrCreateTripCount(L);
2907   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2908 
2909   Type *Ty = TC->getType();
2910   // This is where we can make the step a runtime constant.
2911   Value *Step = createStepForVF(Builder, Ty, VF, UF);
2912 
2913   // If the tail is to be folded by masking, round the number of iterations N
2914   // up to a multiple of Step instead of rounding down. This is done by first
2915   // adding Step-1 and then rounding down. Note that it's ok if this addition
2916   // overflows: the vector induction variable will eventually wrap to zero given
2917   // that it starts at zero and its Step is a power of two; the loop will then
2918   // exit, with the last early-exit vector comparison also producing all-true.
2919   if (Cost->foldTailByMasking()) {
2920     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2921            "VF*UF must be a power of 2 when folding tail by masking");
2922     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
2923     TC = Builder.CreateAdd(
2924         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
2925   }
2926 
2927   // Now we need to generate the expression for the part of the loop that the
2928   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2929   // iterations are not required for correctness, or N - Step, otherwise. Step
2930   // is equal to the vectorization factor (number of SIMD elements) times the
2931   // unroll factor (number of SIMD instructions).
2932   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2933 
2934   // There are cases where we *must* run at least one iteration in the remainder
2935   // loop.  See the cost model for when this can happen.  If the step evenly
2936   // divides the trip count, we set the remainder to be equal to the step. If
2937   // the step does not evenly divide the trip count, no adjustment is necessary
2938   // since there will already be scalar iterations. Note that the minimum
2939   // iterations check ensures that N >= Step.
2940   if (Cost->requiresScalarEpilogue(VF)) {
2941     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2942     R = Builder.CreateSelect(IsZero, Step, R);
2943   }
2944 
2945   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2946 
2947   return VectorTripCount;
2948 }
2949 
2950 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2951                                                    const DataLayout &DL) {
2952   // Verify that V is a vector type with same number of elements as DstVTy.
2953   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2954   unsigned VF = DstFVTy->getNumElements();
2955   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2956   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2957   Type *SrcElemTy = SrcVecTy->getElementType();
2958   Type *DstElemTy = DstFVTy->getElementType();
2959   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2960          "Vector elements must have same size");
2961 
2962   // Do a direct cast if element types are castable.
2963   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2964     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2965   }
2966   // V cannot be directly casted to desired vector type.
2967   // May happen when V is a floating point vector but DstVTy is a vector of
2968   // pointers or vice-versa. Handle this using a two-step bitcast using an
2969   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2970   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2971          "Only one type should be a pointer type");
2972   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2973          "Only one type should be a floating point type");
2974   Type *IntTy =
2975       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2976   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2977   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2978   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2979 }
2980 
2981 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2982                                                          BasicBlock *Bypass) {
2983   Value *Count = getOrCreateTripCount(L);
2984   // Reuse existing vector loop preheader for TC checks.
2985   // Note that new preheader block is generated for vector loop.
2986   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2987   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2988 
2989   // Generate code to check if the loop's trip count is less than VF * UF, or
2990   // equal to it in case a scalar epilogue is required; this implies that the
2991   // vector trip count is zero. This check also covers the case where adding one
2992   // to the backedge-taken count overflowed leading to an incorrect trip count
2993   // of zero. In this case we will also jump to the scalar loop.
2994   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
2995                                             : ICmpInst::ICMP_ULT;
2996 
2997   // If tail is to be folded, vector loop takes care of all iterations.
2998   Value *CheckMinIters = Builder.getFalse();
2999   if (!Cost->foldTailByMasking()) {
3000     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3001     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3002   }
3003   // Create new preheader for vector loop.
3004   LoopVectorPreHeader =
3005       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3006                  "vector.ph");
3007 
3008   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3009                                DT->getNode(Bypass)->getIDom()) &&
3010          "TC check is expected to dominate Bypass");
3011 
3012   // Update dominator for Bypass & LoopExit (if needed).
3013   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3014   if (!Cost->requiresScalarEpilogue(VF))
3015     // If there is an epilogue which must run, there's no edge from the
3016     // middle block to exit blocks  and thus no need to update the immediate
3017     // dominator of the exit blocks.
3018     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3019 
3020   ReplaceInstWithInst(
3021       TCCheckBlock->getTerminator(),
3022       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3023   LoopBypassBlocks.push_back(TCCheckBlock);
3024 }
3025 
3026 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
3027 
3028   BasicBlock *const SCEVCheckBlock =
3029       RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock);
3030   if (!SCEVCheckBlock)
3031     return nullptr;
3032 
3033   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3034            (OptForSizeBasedOnProfile &&
3035             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3036          "Cannot SCEV check stride or overflow when optimizing for size");
3037 
3038 
3039   // Update dominator only if this is first RT check.
3040   if (LoopBypassBlocks.empty()) {
3041     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3042     if (!Cost->requiresScalarEpilogue(VF))
3043       // If there is an epilogue which must run, there's no edge from the
3044       // middle block to exit blocks  and thus no need to update the immediate
3045       // dominator of the exit blocks.
3046       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3047   }
3048 
3049   LoopBypassBlocks.push_back(SCEVCheckBlock);
3050   AddedSafetyChecks = true;
3051   return SCEVCheckBlock;
3052 }
3053 
3054 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3055                                                       BasicBlock *Bypass) {
3056   // VPlan-native path does not do any analysis for runtime checks currently.
3057   if (EnableVPlanNativePath)
3058     return nullptr;
3059 
3060   BasicBlock *const MemCheckBlock =
3061       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3062 
3063   // Check if we generated code that checks in runtime if arrays overlap. We put
3064   // the checks into a separate block to make the more common case of few
3065   // elements faster.
3066   if (!MemCheckBlock)
3067     return nullptr;
3068 
3069   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3070     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3071            "Cannot emit memory checks when optimizing for size, unless forced "
3072            "to vectorize.");
3073     ORE->emit([&]() {
3074       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3075                                         L->getStartLoc(), L->getHeader())
3076              << "Code-size may be reduced by not forcing "
3077                 "vectorization, or by source-code modifications "
3078                 "eliminating the need for runtime checks "
3079                 "(e.g., adding 'restrict').";
3080     });
3081   }
3082 
3083   LoopBypassBlocks.push_back(MemCheckBlock);
3084 
3085   AddedSafetyChecks = true;
3086 
3087   // We currently don't use LoopVersioning for the actual loop cloning but we
3088   // still use it to add the noalias metadata.
3089   LVer = std::make_unique<LoopVersioning>(
3090       *Legal->getLAI(),
3091       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3092       DT, PSE.getSE());
3093   LVer->prepareNoAliasMetadata();
3094   return MemCheckBlock;
3095 }
3096 
3097 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3098   LoopScalarBody = OrigLoop->getHeader();
3099   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3100   assert(LoopVectorPreHeader && "Invalid loop structure");
3101   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3102   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3103          "multiple exit loop without required epilogue?");
3104 
3105   LoopMiddleBlock =
3106       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3107                  LI, nullptr, Twine(Prefix) + "middle.block");
3108   LoopScalarPreHeader =
3109       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3110                  nullptr, Twine(Prefix) + "scalar.ph");
3111 
3112   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3113 
3114   // Set up the middle block terminator.  Two cases:
3115   // 1) If we know that we must execute the scalar epilogue, emit an
3116   //    unconditional branch.
3117   // 2) Otherwise, we must have a single unique exit block (due to how we
3118   //    implement the multiple exit case).  In this case, set up a conditonal
3119   //    branch from the middle block to the loop scalar preheader, and the
3120   //    exit block.  completeLoopSkeleton will update the condition to use an
3121   //    iteration check, if required to decide whether to execute the remainder.
3122   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3123     BranchInst::Create(LoopScalarPreHeader) :
3124     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3125                        Builder.getTrue());
3126   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3127   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3128 
3129   // We intentionally don't let SplitBlock to update LoopInfo since
3130   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3131   // LoopVectorBody is explicitly added to the correct place few lines later.
3132   BasicBlock *LoopVectorBody =
3133       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3134                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3135 
3136   // Update dominator for loop exit.
3137   if (!Cost->requiresScalarEpilogue(VF))
3138     // If there is an epilogue which must run, there's no edge from the
3139     // middle block to exit blocks  and thus no need to update the immediate
3140     // dominator of the exit blocks.
3141     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3142 
3143   // Create and register the new vector loop.
3144   Loop *Lp = LI->AllocateLoop();
3145   Loop *ParentLoop = OrigLoop->getParentLoop();
3146 
3147   // Insert the new loop into the loop nest and register the new basic blocks
3148   // before calling any utilities such as SCEV that require valid LoopInfo.
3149   if (ParentLoop) {
3150     ParentLoop->addChildLoop(Lp);
3151   } else {
3152     LI->addTopLevelLoop(Lp);
3153   }
3154   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3155   return Lp;
3156 }
3157 
3158 void InnerLoopVectorizer::createInductionResumeValues(
3159     Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3160   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3161           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3162          "Inconsistent information about additional bypass.");
3163 
3164   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3165   assert(VectorTripCount && L && "Expected valid arguments");
3166   // We are going to resume the execution of the scalar loop.
3167   // Go over all of the induction variables that we found and fix the
3168   // PHIs that are left in the scalar version of the loop.
3169   // The starting values of PHI nodes depend on the counter of the last
3170   // iteration in the vectorized loop.
3171   // If we come from a bypass edge then we need to start from the original
3172   // start value.
3173   Instruction *OldInduction = Legal->getPrimaryInduction();
3174   for (auto &InductionEntry : Legal->getInductionVars()) {
3175     PHINode *OrigPhi = InductionEntry.first;
3176     InductionDescriptor II = InductionEntry.second;
3177 
3178     // Create phi nodes to merge from the  backedge-taken check block.
3179     PHINode *BCResumeVal =
3180         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3181                         LoopScalarPreHeader->getTerminator());
3182     // Copy original phi DL over to the new one.
3183     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3184     Value *&EndValue = IVEndValues[OrigPhi];
3185     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3186     if (OrigPhi == OldInduction) {
3187       // We know what the end value is.
3188       EndValue = VectorTripCount;
3189     } else {
3190       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3191 
3192       // Fast-math-flags propagate from the original induction instruction.
3193       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3194         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3195 
3196       Type *StepType = II.getStep()->getType();
3197       Instruction::CastOps CastOp =
3198           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3199       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3200       Value *Step =
3201           CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3202       EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3203       EndValue->setName("ind.end");
3204 
3205       // Compute the end value for the additional bypass (if applicable).
3206       if (AdditionalBypass.first) {
3207         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3208         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3209                                          StepType, true);
3210         Value *Step =
3211             CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3212         CRD =
3213             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3214         EndValueFromAdditionalBypass =
3215             emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3216         EndValueFromAdditionalBypass->setName("ind.end");
3217       }
3218     }
3219     // The new PHI merges the original incoming value, in case of a bypass,
3220     // or the value at the end of the vectorized loop.
3221     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3222 
3223     // Fix the scalar body counter (PHI node).
3224     // The old induction's phi node in the scalar body needs the truncated
3225     // value.
3226     for (BasicBlock *BB : LoopBypassBlocks)
3227       BCResumeVal->addIncoming(II.getStartValue(), BB);
3228 
3229     if (AdditionalBypass.first)
3230       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3231                                             EndValueFromAdditionalBypass);
3232 
3233     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3234   }
3235 }
3236 
3237 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3238                                                       MDNode *OrigLoopID) {
3239   assert(L && "Expected valid loop.");
3240 
3241   // The trip counts should be cached by now.
3242   Value *Count = getOrCreateTripCount(L);
3243   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3244 
3245   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3246 
3247   // Add a check in the middle block to see if we have completed
3248   // all of the iterations in the first vector loop.  Three cases:
3249   // 1) If we require a scalar epilogue, there is no conditional branch as
3250   //    we unconditionally branch to the scalar preheader.  Do nothing.
3251   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3252   //    Thus if tail is to be folded, we know we don't need to run the
3253   //    remainder and we can use the previous value for the condition (true).
3254   // 3) Otherwise, construct a runtime check.
3255   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3256     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3257                                         Count, VectorTripCount, "cmp.n",
3258                                         LoopMiddleBlock->getTerminator());
3259 
3260     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3261     // of the corresponding compare because they may have ended up with
3262     // different line numbers and we want to avoid awkward line stepping while
3263     // debugging. Eg. if the compare has got a line number inside the loop.
3264     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3265     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3266   }
3267 
3268   // Get ready to start creating new instructions into the vectorized body.
3269   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3270          "Inconsistent vector loop preheader");
3271 
3272 #ifdef EXPENSIVE_CHECKS
3273   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3274   LI->verify(*DT);
3275 #endif
3276 
3277   return LoopVectorPreHeader;
3278 }
3279 
3280 std::pair<BasicBlock *, Value *>
3281 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3282   /*
3283    In this function we generate a new loop. The new loop will contain
3284    the vectorized instructions while the old loop will continue to run the
3285    scalar remainder.
3286 
3287        [ ] <-- loop iteration number check.
3288     /   |
3289    /    v
3290   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3291   |  /  |
3292   | /   v
3293   ||   [ ]     <-- vector pre header.
3294   |/    |
3295   |     v
3296   |    [  ] \
3297   |    [  ]_|   <-- vector loop.
3298   |     |
3299   |     v
3300   \   -[ ]   <--- middle-block.
3301    \/   |
3302    /\   v
3303    | ->[ ]     <--- new preheader.
3304    |    |
3305  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3306    |   [ ] \
3307    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3308     \   |
3309      \  v
3310       >[ ]     <-- exit block(s).
3311    ...
3312    */
3313 
3314   // Get the metadata of the original loop before it gets modified.
3315   MDNode *OrigLoopID = OrigLoop->getLoopID();
3316 
3317   // Workaround!  Compute the trip count of the original loop and cache it
3318   // before we start modifying the CFG.  This code has a systemic problem
3319   // wherein it tries to run analysis over partially constructed IR; this is
3320   // wrong, and not simply for SCEV.  The trip count of the original loop
3321   // simply happens to be prone to hitting this in practice.  In theory, we
3322   // can hit the same issue for any SCEV, or ValueTracking query done during
3323   // mutation.  See PR49900.
3324   getOrCreateTripCount(OrigLoop);
3325 
3326   // Create an empty vector loop, and prepare basic blocks for the runtime
3327   // checks.
3328   Loop *Lp = createVectorLoopSkeleton("");
3329 
3330   // Now, compare the new count to zero. If it is zero skip the vector loop and
3331   // jump to the scalar loop. This check also covers the case where the
3332   // backedge-taken count is uint##_max: adding one to it will overflow leading
3333   // to an incorrect trip count of zero. In this (rare) case we will also jump
3334   // to the scalar loop.
3335   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3336 
3337   // Generate the code to check any assumptions that we've made for SCEV
3338   // expressions.
3339   emitSCEVChecks(LoopScalarPreHeader);
3340 
3341   // Generate the code that checks in runtime if arrays overlap. We put the
3342   // checks into a separate block to make the more common case of few elements
3343   // faster.
3344   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3345 
3346   createHeaderBranch(Lp);
3347 
3348   // Emit phis for the new starting index of the scalar loop.
3349   createInductionResumeValues(Lp);
3350 
3351   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3352 }
3353 
3354 // Fix up external users of the induction variable. At this point, we are
3355 // in LCSSA form, with all external PHIs that use the IV having one input value,
3356 // coming from the remainder loop. We need those PHIs to also have a correct
3357 // value for the IV when arriving directly from the middle block.
3358 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3359                                        const InductionDescriptor &II,
3360                                        Value *CountRoundDown, Value *EndValue,
3361                                        BasicBlock *MiddleBlock,
3362                                        BasicBlock *VectorHeader) {
3363   // There are two kinds of external IV usages - those that use the value
3364   // computed in the last iteration (the PHI) and those that use the penultimate
3365   // value (the value that feeds into the phi from the loop latch).
3366   // We allow both, but they, obviously, have different values.
3367 
3368   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3369 
3370   DenseMap<Value *, Value *> MissingVals;
3371 
3372   // An external user of the last iteration's value should see the value that
3373   // the remainder loop uses to initialize its own IV.
3374   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3375   for (User *U : PostInc->users()) {
3376     Instruction *UI = cast<Instruction>(U);
3377     if (!OrigLoop->contains(UI)) {
3378       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3379       MissingVals[UI] = EndValue;
3380     }
3381   }
3382 
3383   // An external user of the penultimate value need to see EndValue - Step.
3384   // The simplest way to get this is to recompute it from the constituent SCEVs,
3385   // that is Start + (Step * (CRD - 1)).
3386   for (User *U : OrigPhi->users()) {
3387     auto *UI = cast<Instruction>(U);
3388     if (!OrigLoop->contains(UI)) {
3389       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3390 
3391       IRBuilder<> B(MiddleBlock->getTerminator());
3392 
3393       // Fast-math-flags propagate from the original induction instruction.
3394       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3395         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3396 
3397       Value *CountMinusOne = B.CreateSub(
3398           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3399       Value *CMO =
3400           !II.getStep()->getType()->isIntegerTy()
3401               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3402                              II.getStep()->getType())
3403               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3404       CMO->setName("cast.cmo");
3405 
3406       Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
3407                                     VectorHeader->getTerminator());
3408       Value *Escape =
3409           emitTransformedIndex(B, CMO, II.getStartValue(), Step, II);
3410       Escape->setName("ind.escape");
3411       MissingVals[UI] = Escape;
3412     }
3413   }
3414 
3415   for (auto &I : MissingVals) {
3416     PHINode *PHI = cast<PHINode>(I.first);
3417     // One corner case we have to handle is two IVs "chasing" each-other,
3418     // that is %IV2 = phi [...], [ %IV1, %latch ]
3419     // In this case, if IV1 has an external use, we need to avoid adding both
3420     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3421     // don't already have an incoming value for the middle block.
3422     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3423       PHI->addIncoming(I.second, MiddleBlock);
3424   }
3425 }
3426 
3427 namespace {
3428 
3429 struct CSEDenseMapInfo {
3430   static bool canHandle(const Instruction *I) {
3431     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3432            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3433   }
3434 
3435   static inline Instruction *getEmptyKey() {
3436     return DenseMapInfo<Instruction *>::getEmptyKey();
3437   }
3438 
3439   static inline Instruction *getTombstoneKey() {
3440     return DenseMapInfo<Instruction *>::getTombstoneKey();
3441   }
3442 
3443   static unsigned getHashValue(const Instruction *I) {
3444     assert(canHandle(I) && "Unknown instruction!");
3445     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3446                                                            I->value_op_end()));
3447   }
3448 
3449   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3450     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3451         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3452       return LHS == RHS;
3453     return LHS->isIdenticalTo(RHS);
3454   }
3455 };
3456 
3457 } // end anonymous namespace
3458 
3459 ///Perform cse of induction variable instructions.
3460 static void cse(BasicBlock *BB) {
3461   // Perform simple cse.
3462   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3463   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3464     if (!CSEDenseMapInfo::canHandle(&In))
3465       continue;
3466 
3467     // Check if we can replace this instruction with any of the
3468     // visited instructions.
3469     if (Instruction *V = CSEMap.lookup(&In)) {
3470       In.replaceAllUsesWith(V);
3471       In.eraseFromParent();
3472       continue;
3473     }
3474 
3475     CSEMap[&In] = &In;
3476   }
3477 }
3478 
3479 InstructionCost
3480 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3481                                               bool &NeedToScalarize) const {
3482   Function *F = CI->getCalledFunction();
3483   Type *ScalarRetTy = CI->getType();
3484   SmallVector<Type *, 4> Tys, ScalarTys;
3485   for (auto &ArgOp : CI->args())
3486     ScalarTys.push_back(ArgOp->getType());
3487 
3488   // Estimate cost of scalarized vector call. The source operands are assumed
3489   // to be vectors, so we need to extract individual elements from there,
3490   // execute VF scalar calls, and then gather the result into the vector return
3491   // value.
3492   InstructionCost ScalarCallCost =
3493       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3494   if (VF.isScalar())
3495     return ScalarCallCost;
3496 
3497   // Compute corresponding vector type for return value and arguments.
3498   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3499   for (Type *ScalarTy : ScalarTys)
3500     Tys.push_back(ToVectorTy(ScalarTy, VF));
3501 
3502   // Compute costs of unpacking argument values for the scalar calls and
3503   // packing the return values to a vector.
3504   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3505 
3506   InstructionCost Cost =
3507       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3508 
3509   // If we can't emit a vector call for this function, then the currently found
3510   // cost is the cost we need to return.
3511   NeedToScalarize = true;
3512   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3513   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3514 
3515   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3516     return Cost;
3517 
3518   // If the corresponding vector cost is cheaper, return its cost.
3519   InstructionCost VectorCallCost =
3520       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3521   if (VectorCallCost < Cost) {
3522     NeedToScalarize = false;
3523     Cost = VectorCallCost;
3524   }
3525   return Cost;
3526 }
3527 
3528 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3529   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3530     return Elt;
3531   return VectorType::get(Elt, VF);
3532 }
3533 
3534 InstructionCost
3535 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3536                                                    ElementCount VF) const {
3537   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3538   assert(ID && "Expected intrinsic call!");
3539   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3540   FastMathFlags FMF;
3541   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3542     FMF = FPMO->getFastMathFlags();
3543 
3544   SmallVector<const Value *> Arguments(CI->args());
3545   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3546   SmallVector<Type *> ParamTys;
3547   std::transform(FTy->param_begin(), FTy->param_end(),
3548                  std::back_inserter(ParamTys),
3549                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3550 
3551   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3552                                     dyn_cast<IntrinsicInst>(CI));
3553   return TTI.getIntrinsicInstrCost(CostAttrs,
3554                                    TargetTransformInfo::TCK_RecipThroughput);
3555 }
3556 
3557 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3558   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3559   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3560   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3561 }
3562 
3563 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3564   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3565   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3566   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3567 }
3568 
3569 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3570   // For every instruction `I` in MinBWs, truncate the operands, create a
3571   // truncated version of `I` and reextend its result. InstCombine runs
3572   // later and will remove any ext/trunc pairs.
3573   SmallPtrSet<Value *, 4> Erased;
3574   for (const auto &KV : Cost->getMinimalBitwidths()) {
3575     // If the value wasn't vectorized, we must maintain the original scalar
3576     // type. The absence of the value from State indicates that it
3577     // wasn't vectorized.
3578     // FIXME: Should not rely on getVPValue at this point.
3579     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3580     if (!State.hasAnyVectorValue(Def))
3581       continue;
3582     for (unsigned Part = 0; Part < UF; ++Part) {
3583       Value *I = State.get(Def, Part);
3584       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3585         continue;
3586       Type *OriginalTy = I->getType();
3587       Type *ScalarTruncatedTy =
3588           IntegerType::get(OriginalTy->getContext(), KV.second);
3589       auto *TruncatedTy = VectorType::get(
3590           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3591       if (TruncatedTy == OriginalTy)
3592         continue;
3593 
3594       IRBuilder<> B(cast<Instruction>(I));
3595       auto ShrinkOperand = [&](Value *V) -> Value * {
3596         if (auto *ZI = dyn_cast<ZExtInst>(V))
3597           if (ZI->getSrcTy() == TruncatedTy)
3598             return ZI->getOperand(0);
3599         return B.CreateZExtOrTrunc(V, TruncatedTy);
3600       };
3601 
3602       // The actual instruction modification depends on the instruction type,
3603       // unfortunately.
3604       Value *NewI = nullptr;
3605       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3606         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3607                              ShrinkOperand(BO->getOperand(1)));
3608 
3609         // Any wrapping introduced by shrinking this operation shouldn't be
3610         // considered undefined behavior. So, we can't unconditionally copy
3611         // arithmetic wrapping flags to NewI.
3612         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3613       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3614         NewI =
3615             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3616                          ShrinkOperand(CI->getOperand(1)));
3617       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3618         NewI = B.CreateSelect(SI->getCondition(),
3619                               ShrinkOperand(SI->getTrueValue()),
3620                               ShrinkOperand(SI->getFalseValue()));
3621       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3622         switch (CI->getOpcode()) {
3623         default:
3624           llvm_unreachable("Unhandled cast!");
3625         case Instruction::Trunc:
3626           NewI = ShrinkOperand(CI->getOperand(0));
3627           break;
3628         case Instruction::SExt:
3629           NewI = B.CreateSExtOrTrunc(
3630               CI->getOperand(0),
3631               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3632           break;
3633         case Instruction::ZExt:
3634           NewI = B.CreateZExtOrTrunc(
3635               CI->getOperand(0),
3636               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3637           break;
3638         }
3639       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3640         auto Elements0 =
3641             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3642         auto *O0 = B.CreateZExtOrTrunc(
3643             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3644         auto Elements1 =
3645             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3646         auto *O1 = B.CreateZExtOrTrunc(
3647             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3648 
3649         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3650       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3651         // Don't do anything with the operands, just extend the result.
3652         continue;
3653       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3654         auto Elements =
3655             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3656         auto *O0 = B.CreateZExtOrTrunc(
3657             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3658         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3659         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3660       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3661         auto Elements =
3662             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3663         auto *O0 = B.CreateZExtOrTrunc(
3664             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3665         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3666       } else {
3667         // If we don't know what to do, be conservative and don't do anything.
3668         continue;
3669       }
3670 
3671       // Lastly, extend the result.
3672       NewI->takeName(cast<Instruction>(I));
3673       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3674       I->replaceAllUsesWith(Res);
3675       cast<Instruction>(I)->eraseFromParent();
3676       Erased.insert(I);
3677       State.reset(Def, Res, Part);
3678     }
3679   }
3680 
3681   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3682   for (const auto &KV : Cost->getMinimalBitwidths()) {
3683     // If the value wasn't vectorized, we must maintain the original scalar
3684     // type. The absence of the value from State indicates that it
3685     // wasn't vectorized.
3686     // FIXME: Should not rely on getVPValue at this point.
3687     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3688     if (!State.hasAnyVectorValue(Def))
3689       continue;
3690     for (unsigned Part = 0; Part < UF; ++Part) {
3691       Value *I = State.get(Def, Part);
3692       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3693       if (Inst && Inst->use_empty()) {
3694         Value *NewI = Inst->getOperand(0);
3695         Inst->eraseFromParent();
3696         State.reset(Def, NewI, Part);
3697       }
3698     }
3699   }
3700 }
3701 
3702 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3703   // Insert truncates and extends for any truncated instructions as hints to
3704   // InstCombine.
3705   if (VF.isVector())
3706     truncateToMinimalBitwidths(State);
3707 
3708   // Fix widened non-induction PHIs by setting up the PHI operands.
3709   if (OrigPHIsToFix.size()) {
3710     assert(EnableVPlanNativePath &&
3711            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3712     fixNonInductionPHIs(State);
3713   }
3714 
3715   // At this point every instruction in the original loop is widened to a
3716   // vector form. Now we need to fix the recurrences in the loop. These PHI
3717   // nodes are currently empty because we did not want to introduce cycles.
3718   // This is the second stage of vectorizing recurrences.
3719   fixCrossIterationPHIs(State);
3720 
3721   // Forget the original basic block.
3722   PSE.getSE()->forgetLoop(OrigLoop);
3723 
3724   Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB);
3725   // If we inserted an edge from the middle block to the unique exit block,
3726   // update uses outside the loop (phis) to account for the newly inserted
3727   // edge.
3728   if (!Cost->requiresScalarEpilogue(VF)) {
3729     // Fix-up external users of the induction variables.
3730     for (auto &Entry : Legal->getInductionVars())
3731       fixupIVUsers(
3732           Entry.first, Entry.second, getOrCreateVectorTripCount(VectorLoop),
3733           IVEndValues[Entry.first], LoopMiddleBlock, VectorLoop->getHeader());
3734 
3735     fixLCSSAPHIs(State);
3736   }
3737 
3738   for (Instruction *PI : PredicatedInstructions)
3739     sinkScalarOperands(&*PI);
3740 
3741   // Remove redundant induction instructions.
3742   cse(VectorLoop->getHeader());
3743 
3744   // Set/update profile weights for the vector and remainder loops as original
3745   // loop iterations are now distributed among them. Note that original loop
3746   // represented by LoopScalarBody becomes remainder loop after vectorization.
3747   //
3748   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3749   // end up getting slightly roughened result but that should be OK since
3750   // profile is not inherently precise anyway. Note also possible bypass of
3751   // vector code caused by legality checks is ignored, assigning all the weight
3752   // to the vector loop, optimistically.
3753   //
3754   // For scalable vectorization we can't know at compile time how many iterations
3755   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3756   // vscale of '1'.
3757   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop,
3758                                LI->getLoopFor(LoopScalarBody),
3759                                VF.getKnownMinValue() * UF);
3760 }
3761 
3762 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3763   // In order to support recurrences we need to be able to vectorize Phi nodes.
3764   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3765   // stage #2: We now need to fix the recurrences by adding incoming edges to
3766   // the currently empty PHI nodes. At this point every instruction in the
3767   // original loop is widened to a vector form so we can use them to construct
3768   // the incoming edges.
3769   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
3770   for (VPRecipeBase &R : Header->phis()) {
3771     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
3772       fixReduction(ReductionPhi, State);
3773     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3774       fixFirstOrderRecurrence(FOR, State);
3775   }
3776 }
3777 
3778 void InnerLoopVectorizer::fixFirstOrderRecurrence(
3779     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
3780   // This is the second phase of vectorizing first-order recurrences. An
3781   // overview of the transformation is described below. Suppose we have the
3782   // following loop.
3783   //
3784   //   for (int i = 0; i < n; ++i)
3785   //     b[i] = a[i] - a[i - 1];
3786   //
3787   // There is a first-order recurrence on "a". For this loop, the shorthand
3788   // scalar IR looks like:
3789   //
3790   //   scalar.ph:
3791   //     s_init = a[-1]
3792   //     br scalar.body
3793   //
3794   //   scalar.body:
3795   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3796   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3797   //     s2 = a[i]
3798   //     b[i] = s2 - s1
3799   //     br cond, scalar.body, ...
3800   //
3801   // In this example, s1 is a recurrence because it's value depends on the
3802   // previous iteration. In the first phase of vectorization, we created a
3803   // vector phi v1 for s1. We now complete the vectorization and produce the
3804   // shorthand vector IR shown below (for VF = 4, UF = 1).
3805   //
3806   //   vector.ph:
3807   //     v_init = vector(..., ..., ..., a[-1])
3808   //     br vector.body
3809   //
3810   //   vector.body
3811   //     i = phi [0, vector.ph], [i+4, vector.body]
3812   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3813   //     v2 = a[i, i+1, i+2, i+3];
3814   //     v3 = vector(v1(3), v2(0, 1, 2))
3815   //     b[i, i+1, i+2, i+3] = v2 - v3
3816   //     br cond, vector.body, middle.block
3817   //
3818   //   middle.block:
3819   //     x = v2(3)
3820   //     br scalar.ph
3821   //
3822   //   scalar.ph:
3823   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3824   //     br scalar.body
3825   //
3826   // After execution completes the vector loop, we extract the next value of
3827   // the recurrence (x) to use as the initial value in the scalar loop.
3828 
3829   // Extract the last vector element in the middle block. This will be the
3830   // initial value for the recurrence when jumping to the scalar loop.
3831   VPValue *PreviousDef = PhiR->getBackedgeValue();
3832   Value *Incoming = State.get(PreviousDef, UF - 1);
3833   auto *ExtractForScalar = Incoming;
3834   auto *IdxTy = Builder.getInt32Ty();
3835   if (VF.isVector()) {
3836     auto *One = ConstantInt::get(IdxTy, 1);
3837     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3838     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3839     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3840     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
3841                                                     "vector.recur.extract");
3842   }
3843   // Extract the second last element in the middle block if the
3844   // Phi is used outside the loop. We need to extract the phi itself
3845   // and not the last element (the phi update in the current iteration). This
3846   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3847   // when the scalar loop is not run at all.
3848   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3849   if (VF.isVector()) {
3850     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3851     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
3852     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3853         Incoming, Idx, "vector.recur.extract.for.phi");
3854   } else if (UF > 1)
3855     // When loop is unrolled without vectorizing, initialize
3856     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
3857     // of `Incoming`. This is analogous to the vectorized case above: extracting
3858     // the second last element when VF > 1.
3859     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
3860 
3861   // Fix the initial value of the original recurrence in the scalar loop.
3862   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3863   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
3864   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3865   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
3866   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3867     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3868     Start->addIncoming(Incoming, BB);
3869   }
3870 
3871   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3872   Phi->setName("scalar.recur");
3873 
3874   // Finally, fix users of the recurrence outside the loop. The users will need
3875   // either the last value of the scalar recurrence or the last value of the
3876   // vector recurrence we extracted in the middle block. Since the loop is in
3877   // LCSSA form, we just need to find all the phi nodes for the original scalar
3878   // recurrence in the exit block, and then add an edge for the middle block.
3879   // Note that LCSSA does not imply single entry when the original scalar loop
3880   // had multiple exiting edges (as we always run the last iteration in the
3881   // scalar epilogue); in that case, there is no edge from middle to exit and
3882   // and thus no phis which needed updated.
3883   if (!Cost->requiresScalarEpilogue(VF))
3884     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
3885       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
3886         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3887 }
3888 
3889 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
3890                                        VPTransformState &State) {
3891   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
3892   // Get it's reduction variable descriptor.
3893   assert(Legal->isReductionVariable(OrigPhi) &&
3894          "Unable to find the reduction variable");
3895   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
3896 
3897   RecurKind RK = RdxDesc.getRecurrenceKind();
3898   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3899   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3900   setDebugLocFromInst(ReductionStartValue);
3901 
3902   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
3903   // This is the vector-clone of the value that leaves the loop.
3904   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
3905 
3906   // Wrap flags are in general invalid after vectorization, clear them.
3907   clearReductionWrapFlags(RdxDesc, State);
3908 
3909   // Before each round, move the insertion point right between
3910   // the PHIs and the values we are going to write.
3911   // This allows us to write both PHINodes and the extractelement
3912   // instructions.
3913   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3914 
3915   setDebugLocFromInst(LoopExitInst);
3916 
3917   Type *PhiTy = OrigPhi->getType();
3918   BasicBlock *VectorLoopLatch =
3919       LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
3920   // If tail is folded by masking, the vector value to leave the loop should be
3921   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3922   // instead of the former. For an inloop reduction the reduction will already
3923   // be predicated, and does not need to be handled here.
3924   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
3925     for (unsigned Part = 0; Part < UF; ++Part) {
3926       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
3927       Value *Sel = nullptr;
3928       for (User *U : VecLoopExitInst->users()) {
3929         if (isa<SelectInst>(U)) {
3930           assert(!Sel && "Reduction exit feeding two selects");
3931           Sel = U;
3932         } else
3933           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3934       }
3935       assert(Sel && "Reduction exit feeds no select");
3936       State.reset(LoopExitInstDef, Sel, Part);
3937 
3938       // If the target can create a predicated operator for the reduction at no
3939       // extra cost in the loop (for example a predicated vadd), it can be
3940       // cheaper for the select to remain in the loop than be sunk out of it,
3941       // and so use the select value for the phi instead of the old
3942       // LoopExitValue.
3943       if (PreferPredicatedReductionSelect ||
3944           TTI->preferPredicatedReductionSelect(
3945               RdxDesc.getOpcode(), PhiTy,
3946               TargetTransformInfo::ReductionFlags())) {
3947         auto *VecRdxPhi =
3948             cast<PHINode>(State.get(PhiR, Part));
3949         VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel);
3950       }
3951     }
3952   }
3953 
3954   // If the vector reduction can be performed in a smaller type, we truncate
3955   // then extend the loop exit value to enable InstCombine to evaluate the
3956   // entire expression in the smaller type.
3957   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
3958     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
3959     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3960     Builder.SetInsertPoint(VectorLoopLatch->getTerminator());
3961     VectorParts RdxParts(UF);
3962     for (unsigned Part = 0; Part < UF; ++Part) {
3963       RdxParts[Part] = State.get(LoopExitInstDef, Part);
3964       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3965       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3966                                         : Builder.CreateZExt(Trunc, VecTy);
3967       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
3968         if (U != Trunc) {
3969           U->replaceUsesOfWith(RdxParts[Part], Extnd);
3970           RdxParts[Part] = Extnd;
3971         }
3972     }
3973     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3974     for (unsigned Part = 0; Part < UF; ++Part) {
3975       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3976       State.reset(LoopExitInstDef, RdxParts[Part], Part);
3977     }
3978   }
3979 
3980   // Reduce all of the unrolled parts into a single vector.
3981   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
3982   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
3983 
3984   // The middle block terminator has already been assigned a DebugLoc here (the
3985   // OrigLoop's single latch terminator). We want the whole middle block to
3986   // appear to execute on this line because: (a) it is all compiler generated,
3987   // (b) these instructions are always executed after evaluating the latch
3988   // conditional branch, and (c) other passes may add new predecessors which
3989   // terminate on this line. This is the easiest way to ensure we don't
3990   // accidentally cause an extra step back into the loop while debugging.
3991   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
3992   if (PhiR->isOrdered())
3993     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
3994   else {
3995     // Floating-point operations should have some FMF to enable the reduction.
3996     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
3997     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
3998     for (unsigned Part = 1; Part < UF; ++Part) {
3999       Value *RdxPart = State.get(LoopExitInstDef, Part);
4000       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4001         ReducedPartRdx = Builder.CreateBinOp(
4002             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4003       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4004         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4005                                            ReducedPartRdx, RdxPart);
4006       else
4007         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4008     }
4009   }
4010 
4011   // Create the reduction after the loop. Note that inloop reductions create the
4012   // target reduction in the loop using a Reduction recipe.
4013   if (VF.isVector() && !PhiR->isInLoop()) {
4014     ReducedPartRdx =
4015         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4016     // If the reduction can be performed in a smaller type, we need to extend
4017     // the reduction to the wider type before we branch to the original loop.
4018     if (PhiTy != RdxDesc.getRecurrenceType())
4019       ReducedPartRdx = RdxDesc.isSigned()
4020                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4021                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4022   }
4023 
4024   PHINode *ResumePhi =
4025       dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
4026 
4027   // Create a phi node that merges control-flow from the backedge-taken check
4028   // block and the middle block.
4029   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4030                                         LoopScalarPreHeader->getTerminator());
4031 
4032   // If we are fixing reductions in the epilogue loop then we should already
4033   // have created a bc.merge.rdx Phi after the main vector body. Ensure that
4034   // we carry over the incoming values correctly.
4035   for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
4036     if (Incoming == LoopMiddleBlock)
4037       BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
4038     else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
4039       BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
4040                               Incoming);
4041     else
4042       BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
4043   }
4044 
4045   // Set the resume value for this reduction
4046   ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
4047 
4048   // Now, we need to fix the users of the reduction variable
4049   // inside and outside of the scalar remainder loop.
4050 
4051   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4052   // in the exit blocks.  See comment on analogous loop in
4053   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4054   if (!Cost->requiresScalarEpilogue(VF))
4055     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4056       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4057         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4058 
4059   // Fix the scalar loop reduction variable with the incoming reduction sum
4060   // from the vector body and from the backedge value.
4061   int IncomingEdgeBlockIdx =
4062       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4063   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4064   // Pick the other block.
4065   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4066   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4067   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4068 }
4069 
4070 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4071                                                   VPTransformState &State) {
4072   RecurKind RK = RdxDesc.getRecurrenceKind();
4073   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4074     return;
4075 
4076   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4077   assert(LoopExitInstr && "null loop exit instruction");
4078   SmallVector<Instruction *, 8> Worklist;
4079   SmallPtrSet<Instruction *, 8> Visited;
4080   Worklist.push_back(LoopExitInstr);
4081   Visited.insert(LoopExitInstr);
4082 
4083   while (!Worklist.empty()) {
4084     Instruction *Cur = Worklist.pop_back_val();
4085     if (isa<OverflowingBinaryOperator>(Cur))
4086       for (unsigned Part = 0; Part < UF; ++Part) {
4087         // FIXME: Should not rely on getVPValue at this point.
4088         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4089         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4090       }
4091 
4092     for (User *U : Cur->users()) {
4093       Instruction *UI = cast<Instruction>(U);
4094       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4095           Visited.insert(UI).second)
4096         Worklist.push_back(UI);
4097     }
4098   }
4099 }
4100 
4101 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4102   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4103     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4104       // Some phis were already hand updated by the reduction and recurrence
4105       // code above, leave them alone.
4106       continue;
4107 
4108     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4109     // Non-instruction incoming values will have only one value.
4110 
4111     VPLane Lane = VPLane::getFirstLane();
4112     if (isa<Instruction>(IncomingValue) &&
4113         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4114                                            VF))
4115       Lane = VPLane::getLastLaneForVF(VF);
4116 
4117     // Can be a loop invariant incoming value or the last scalar value to be
4118     // extracted from the vectorized loop.
4119     // FIXME: Should not rely on getVPValue at this point.
4120     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4121     Value *lastIncomingValue =
4122         OrigLoop->isLoopInvariant(IncomingValue)
4123             ? IncomingValue
4124             : State.get(State.Plan->getVPValue(IncomingValue, true),
4125                         VPIteration(UF - 1, Lane));
4126     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4127   }
4128 }
4129 
4130 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4131   // The basic block and loop containing the predicated instruction.
4132   auto *PredBB = PredInst->getParent();
4133   auto *VectorLoop = LI->getLoopFor(PredBB);
4134 
4135   // Initialize a worklist with the operands of the predicated instruction.
4136   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4137 
4138   // Holds instructions that we need to analyze again. An instruction may be
4139   // reanalyzed if we don't yet know if we can sink it or not.
4140   SmallVector<Instruction *, 8> InstsToReanalyze;
4141 
4142   // Returns true if a given use occurs in the predicated block. Phi nodes use
4143   // their operands in their corresponding predecessor blocks.
4144   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4145     auto *I = cast<Instruction>(U.getUser());
4146     BasicBlock *BB = I->getParent();
4147     if (auto *Phi = dyn_cast<PHINode>(I))
4148       BB = Phi->getIncomingBlock(
4149           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4150     return BB == PredBB;
4151   };
4152 
4153   // Iteratively sink the scalarized operands of the predicated instruction
4154   // into the block we created for it. When an instruction is sunk, it's
4155   // operands are then added to the worklist. The algorithm ends after one pass
4156   // through the worklist doesn't sink a single instruction.
4157   bool Changed;
4158   do {
4159     // Add the instructions that need to be reanalyzed to the worklist, and
4160     // reset the changed indicator.
4161     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4162     InstsToReanalyze.clear();
4163     Changed = false;
4164 
4165     while (!Worklist.empty()) {
4166       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4167 
4168       // We can't sink an instruction if it is a phi node, is not in the loop,
4169       // or may have side effects.
4170       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4171           I->mayHaveSideEffects())
4172         continue;
4173 
4174       // If the instruction is already in PredBB, check if we can sink its
4175       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4176       // sinking the scalar instruction I, hence it appears in PredBB; but it
4177       // may have failed to sink I's operands (recursively), which we try
4178       // (again) here.
4179       if (I->getParent() == PredBB) {
4180         Worklist.insert(I->op_begin(), I->op_end());
4181         continue;
4182       }
4183 
4184       // It's legal to sink the instruction if all its uses occur in the
4185       // predicated block. Otherwise, there's nothing to do yet, and we may
4186       // need to reanalyze the instruction.
4187       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4188         InstsToReanalyze.push_back(I);
4189         continue;
4190       }
4191 
4192       // Move the instruction to the beginning of the predicated block, and add
4193       // it's operands to the worklist.
4194       I->moveBefore(&*PredBB->getFirstInsertionPt());
4195       Worklist.insert(I->op_begin(), I->op_end());
4196 
4197       // The sinking may have enabled other instructions to be sunk, so we will
4198       // need to iterate.
4199       Changed = true;
4200     }
4201   } while (Changed);
4202 }
4203 
4204 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4205   for (PHINode *OrigPhi : OrigPHIsToFix) {
4206     VPWidenPHIRecipe *VPPhi =
4207         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4208     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4209     // Make sure the builder has a valid insert point.
4210     Builder.SetInsertPoint(NewPhi);
4211     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4212       VPValue *Inc = VPPhi->getIncomingValue(i);
4213       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4214       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4215     }
4216   }
4217 }
4218 
4219 bool InnerLoopVectorizer::useOrderedReductions(
4220     const RecurrenceDescriptor &RdxDesc) {
4221   return Cost->useOrderedReductions(RdxDesc);
4222 }
4223 
4224 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4225                                               VPWidenPHIRecipe *PhiR,
4226                                               VPTransformState &State) {
4227   PHINode *P = cast<PHINode>(PN);
4228   if (EnableVPlanNativePath) {
4229     // Currently we enter here in the VPlan-native path for non-induction
4230     // PHIs where all control flow is uniform. We simply widen these PHIs.
4231     // Create a vector phi with no operands - the vector phi operands will be
4232     // set at the end of vector code generation.
4233     Type *VecTy = (State.VF.isScalar())
4234                       ? PN->getType()
4235                       : VectorType::get(PN->getType(), State.VF);
4236     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4237     State.set(PhiR, VecPhi, 0);
4238     OrigPHIsToFix.push_back(P);
4239 
4240     return;
4241   }
4242 
4243   assert(PN->getParent() == OrigLoop->getHeader() &&
4244          "Non-header phis should have been handled elsewhere");
4245 
4246   // In order to support recurrences we need to be able to vectorize Phi nodes.
4247   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4248   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4249   // this value when we vectorize all of the instructions that use the PHI.
4250 
4251   assert(!Legal->isReductionVariable(P) &&
4252          "reductions should be handled elsewhere");
4253 
4254   setDebugLocFromInst(P);
4255 
4256   // This PHINode must be an induction variable.
4257   // Make sure that we know about it.
4258   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4259 
4260   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4261   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4262 
4263   auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4264   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4265 
4266   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4267   // which can be found from the original scalar operations.
4268   switch (II.getKind()) {
4269   case InductionDescriptor::IK_NoInduction:
4270     llvm_unreachable("Unknown induction");
4271   case InductionDescriptor::IK_IntInduction:
4272   case InductionDescriptor::IK_FpInduction:
4273     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4274   case InductionDescriptor::IK_PtrInduction: {
4275     // Handle the pointer induction variable case.
4276     assert(P->getType()->isPointerTy() && "Unexpected type.");
4277 
4278     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4279       // This is the normalized GEP that starts counting at zero.
4280       Value *PtrInd =
4281           Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4282       // Determine the number of scalars we need to generate for each unroll
4283       // iteration. If the instruction is uniform, we only need to generate the
4284       // first lane. Otherwise, we generate all VF values.
4285       bool IsUniform = vputils::onlyFirstLaneUsed(PhiR);
4286       assert((IsUniform || !State.VF.isScalable()) &&
4287              "Cannot scalarize a scalable VF");
4288       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4289 
4290       for (unsigned Part = 0; Part < UF; ++Part) {
4291         Value *PartStart =
4292             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4293 
4294         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4295           Value *Idx = Builder.CreateAdd(
4296               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4297           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4298 
4299           Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
4300                                         State.CFG.PrevBB->getTerminator());
4301           Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx,
4302                                                 II.getStartValue(), Step, II);
4303           SclrGep->setName("next.gep");
4304           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4305         }
4306       }
4307       return;
4308     }
4309     assert(isa<SCEVConstant>(II.getStep()) &&
4310            "Induction step not a SCEV constant!");
4311     Type *PhiType = II.getStep()->getType();
4312 
4313     // Build a pointer phi
4314     Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4315     Type *ScStValueType = ScalarStartValue->getType();
4316     PHINode *NewPointerPhi =
4317         PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4318     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4319 
4320     // A pointer induction, performed by using a gep
4321     BasicBlock *LoopLatch = LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
4322     Instruction *InductionLoc = LoopLatch->getTerminator();
4323     const SCEV *ScalarStep = II.getStep();
4324     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4325     Value *ScalarStepValue =
4326         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4327     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4328     Value *NumUnrolledElems =
4329         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4330     Value *InductionGEP = GetElementPtrInst::Create(
4331         II.getElementType(), NewPointerPhi,
4332         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4333         InductionLoc);
4334     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4335 
4336     // Create UF many actual address geps that use the pointer
4337     // phi as base and a vectorized version of the step value
4338     // (<step*0, ..., step*N>) as offset.
4339     for (unsigned Part = 0; Part < State.UF; ++Part) {
4340       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4341       Value *StartOffsetScalar =
4342           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4343       Value *StartOffset =
4344           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4345       // Create a vector of consecutive numbers from zero to VF.
4346       StartOffset =
4347           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4348 
4349       Value *GEP = Builder.CreateGEP(
4350           II.getElementType(), NewPointerPhi,
4351           Builder.CreateMul(
4352               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4353               "vector.gep"));
4354       State.set(PhiR, GEP, Part);
4355     }
4356   }
4357   }
4358 }
4359 
4360 /// A helper function for checking whether an integer division-related
4361 /// instruction may divide by zero (in which case it must be predicated if
4362 /// executed conditionally in the scalar code).
4363 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4364 /// Non-zero divisors that are non compile-time constants will not be
4365 /// converted into multiplication, so we will still end up scalarizing
4366 /// the division, but can do so w/o predication.
4367 static bool mayDivideByZero(Instruction &I) {
4368   assert((I.getOpcode() == Instruction::UDiv ||
4369           I.getOpcode() == Instruction::SDiv ||
4370           I.getOpcode() == Instruction::URem ||
4371           I.getOpcode() == Instruction::SRem) &&
4372          "Unexpected instruction");
4373   Value *Divisor = I.getOperand(1);
4374   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4375   return !CInt || CInt->isZero();
4376 }
4377 
4378 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4379                                                VPUser &ArgOperands,
4380                                                VPTransformState &State) {
4381   assert(!isa<DbgInfoIntrinsic>(I) &&
4382          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4383   setDebugLocFromInst(&I);
4384 
4385   Module *M = I.getParent()->getParent()->getParent();
4386   auto *CI = cast<CallInst>(&I);
4387 
4388   SmallVector<Type *, 4> Tys;
4389   for (Value *ArgOperand : CI->args())
4390     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4391 
4392   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4393 
4394   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4395   // version of the instruction.
4396   // Is it beneficial to perform intrinsic call compared to lib call?
4397   bool NeedToScalarize = false;
4398   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4399   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4400   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4401   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4402          "Instruction should be scalarized elsewhere.");
4403   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4404          "Either the intrinsic cost or vector call cost must be valid");
4405 
4406   for (unsigned Part = 0; Part < UF; ++Part) {
4407     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4408     SmallVector<Value *, 4> Args;
4409     for (auto &I : enumerate(ArgOperands.operands())) {
4410       // Some intrinsics have a scalar argument - don't replace it with a
4411       // vector.
4412       Value *Arg;
4413       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4414         Arg = State.get(I.value(), Part);
4415       else {
4416         Arg = State.get(I.value(), VPIteration(0, 0));
4417         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4418           TysForDecl.push_back(Arg->getType());
4419       }
4420       Args.push_back(Arg);
4421     }
4422 
4423     Function *VectorF;
4424     if (UseVectorIntrinsic) {
4425       // Use vector version of the intrinsic.
4426       if (VF.isVector())
4427         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4428       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4429       assert(VectorF && "Can't retrieve vector intrinsic.");
4430     } else {
4431       // Use vector version of the function call.
4432       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4433 #ifndef NDEBUG
4434       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4435              "Can't create vector function.");
4436 #endif
4437         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4438     }
4439       SmallVector<OperandBundleDef, 1> OpBundles;
4440       CI->getOperandBundlesAsDefs(OpBundles);
4441       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4442 
4443       if (isa<FPMathOperator>(V))
4444         V->copyFastMathFlags(CI);
4445 
4446       State.set(Def, V, Part);
4447       addMetadata(V, &I);
4448   }
4449 }
4450 
4451 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4452   // We should not collect Scalars more than once per VF. Right now, this
4453   // function is called from collectUniformsAndScalars(), which already does
4454   // this check. Collecting Scalars for VF=1 does not make any sense.
4455   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4456          "This function should not be visited twice for the same VF");
4457 
4458   // This avoids any chances of creating a REPLICATE recipe during planning
4459   // since that would result in generation of scalarized code during execution,
4460   // which is not supported for scalable vectors.
4461   if (VF.isScalable()) {
4462     Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
4463     return;
4464   }
4465 
4466   SmallSetVector<Instruction *, 8> Worklist;
4467 
4468   // These sets are used to seed the analysis with pointers used by memory
4469   // accesses that will remain scalar.
4470   SmallSetVector<Instruction *, 8> ScalarPtrs;
4471   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4472   auto *Latch = TheLoop->getLoopLatch();
4473 
4474   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4475   // The pointer operands of loads and stores will be scalar as long as the
4476   // memory access is not a gather or scatter operation. The value operand of a
4477   // store will remain scalar if the store is scalarized.
4478   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4479     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4480     assert(WideningDecision != CM_Unknown &&
4481            "Widening decision should be ready at this moment");
4482     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4483       if (Ptr == Store->getValueOperand())
4484         return WideningDecision == CM_Scalarize;
4485     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4486            "Ptr is neither a value or pointer operand");
4487     return WideningDecision != CM_GatherScatter;
4488   };
4489 
4490   // A helper that returns true if the given value is a bitcast or
4491   // getelementptr instruction contained in the loop.
4492   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4493     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4494             isa<GetElementPtrInst>(V)) &&
4495            !TheLoop->isLoopInvariant(V);
4496   };
4497 
4498   // A helper that evaluates a memory access's use of a pointer. If the use will
4499   // be a scalar use and the pointer is only used by memory accesses, we place
4500   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4501   // PossibleNonScalarPtrs.
4502   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4503     // We only care about bitcast and getelementptr instructions contained in
4504     // the loop.
4505     if (!isLoopVaryingBitCastOrGEP(Ptr))
4506       return;
4507 
4508     // If the pointer has already been identified as scalar (e.g., if it was
4509     // also identified as uniform), there's nothing to do.
4510     auto *I = cast<Instruction>(Ptr);
4511     if (Worklist.count(I))
4512       return;
4513 
4514     // If the use of the pointer will be a scalar use, and all users of the
4515     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4516     // place the pointer in PossibleNonScalarPtrs.
4517     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4518           return isa<LoadInst>(U) || isa<StoreInst>(U);
4519         }))
4520       ScalarPtrs.insert(I);
4521     else
4522       PossibleNonScalarPtrs.insert(I);
4523   };
4524 
4525   // We seed the scalars analysis with three classes of instructions: (1)
4526   // instructions marked uniform-after-vectorization and (2) bitcast,
4527   // getelementptr and (pointer) phi instructions used by memory accesses
4528   // requiring a scalar use.
4529   //
4530   // (1) Add to the worklist all instructions that have been identified as
4531   // uniform-after-vectorization.
4532   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4533 
4534   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4535   // memory accesses requiring a scalar use. The pointer operands of loads and
4536   // stores will be scalar as long as the memory accesses is not a gather or
4537   // scatter operation. The value operand of a store will remain scalar if the
4538   // store is scalarized.
4539   for (auto *BB : TheLoop->blocks())
4540     for (auto &I : *BB) {
4541       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4542         evaluatePtrUse(Load, Load->getPointerOperand());
4543       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4544         evaluatePtrUse(Store, Store->getPointerOperand());
4545         evaluatePtrUse(Store, Store->getValueOperand());
4546       }
4547     }
4548   for (auto *I : ScalarPtrs)
4549     if (!PossibleNonScalarPtrs.count(I)) {
4550       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4551       Worklist.insert(I);
4552     }
4553 
4554   // Insert the forced scalars.
4555   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4556   // induction variable when the PHI user is scalarized.
4557   auto ForcedScalar = ForcedScalars.find(VF);
4558   if (ForcedScalar != ForcedScalars.end())
4559     for (auto *I : ForcedScalar->second)
4560       Worklist.insert(I);
4561 
4562   // Expand the worklist by looking through any bitcasts and getelementptr
4563   // instructions we've already identified as scalar. This is similar to the
4564   // expansion step in collectLoopUniforms(); however, here we're only
4565   // expanding to include additional bitcasts and getelementptr instructions.
4566   unsigned Idx = 0;
4567   while (Idx != Worklist.size()) {
4568     Instruction *Dst = Worklist[Idx++];
4569     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4570       continue;
4571     auto *Src = cast<Instruction>(Dst->getOperand(0));
4572     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4573           auto *J = cast<Instruction>(U);
4574           return !TheLoop->contains(J) || Worklist.count(J) ||
4575                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4576                   isScalarUse(J, Src));
4577         })) {
4578       Worklist.insert(Src);
4579       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4580     }
4581   }
4582 
4583   // An induction variable will remain scalar if all users of the induction
4584   // variable and induction variable update remain scalar.
4585   for (auto &Induction : Legal->getInductionVars()) {
4586     auto *Ind = Induction.first;
4587     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4588 
4589     // If tail-folding is applied, the primary induction variable will be used
4590     // to feed a vector compare.
4591     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4592       continue;
4593 
4594     // Returns true if \p Indvar is a pointer induction that is used directly by
4595     // load/store instruction \p I.
4596     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4597                                               Instruction *I) {
4598       return Induction.second.getKind() ==
4599                  InductionDescriptor::IK_PtrInduction &&
4600              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4601              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4602     };
4603 
4604     // Determine if all users of the induction variable are scalar after
4605     // vectorization.
4606     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4607       auto *I = cast<Instruction>(U);
4608       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4609              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4610     });
4611     if (!ScalarInd)
4612       continue;
4613 
4614     // Determine if all users of the induction variable update instruction are
4615     // scalar after vectorization.
4616     auto ScalarIndUpdate =
4617         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4618           auto *I = cast<Instruction>(U);
4619           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4620                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4621         });
4622     if (!ScalarIndUpdate)
4623       continue;
4624 
4625     // The induction variable and its update instruction will remain scalar.
4626     Worklist.insert(Ind);
4627     Worklist.insert(IndUpdate);
4628     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4629     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4630                       << "\n");
4631   }
4632 
4633   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4634 }
4635 
4636 bool LoopVectorizationCostModel::isScalarWithPredication(
4637     Instruction *I, ElementCount VF) const {
4638   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4639     return false;
4640   switch(I->getOpcode()) {
4641   default:
4642     break;
4643   case Instruction::Load:
4644   case Instruction::Store: {
4645     if (!Legal->isMaskRequired(I))
4646       return false;
4647     auto *Ptr = getLoadStorePointerOperand(I);
4648     auto *Ty = getLoadStoreType(I);
4649     Type *VTy = Ty;
4650     if (VF.isVector())
4651       VTy = VectorType::get(Ty, VF);
4652     const Align Alignment = getLoadStoreAlignment(I);
4653     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4654                                 TTI.isLegalMaskedGather(VTy, Alignment))
4655                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4656                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4657   }
4658   case Instruction::UDiv:
4659   case Instruction::SDiv:
4660   case Instruction::SRem:
4661   case Instruction::URem:
4662     return mayDivideByZero(*I);
4663   }
4664   return false;
4665 }
4666 
4667 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4668     Instruction *I, ElementCount VF) {
4669   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4670   assert(getWideningDecision(I, VF) == CM_Unknown &&
4671          "Decision should not be set yet.");
4672   auto *Group = getInterleavedAccessGroup(I);
4673   assert(Group && "Must have a group.");
4674 
4675   // If the instruction's allocated size doesn't equal it's type size, it
4676   // requires padding and will be scalarized.
4677   auto &DL = I->getModule()->getDataLayout();
4678   auto *ScalarTy = getLoadStoreType(I);
4679   if (hasIrregularType(ScalarTy, DL))
4680     return false;
4681 
4682   // Check if masking is required.
4683   // A Group may need masking for one of two reasons: it resides in a block that
4684   // needs predication, or it was decided to use masking to deal with gaps
4685   // (either a gap at the end of a load-access that may result in a speculative
4686   // load, or any gaps in a store-access).
4687   bool PredicatedAccessRequiresMasking =
4688       blockNeedsPredicationForAnyReason(I->getParent()) &&
4689       Legal->isMaskRequired(I);
4690   bool LoadAccessWithGapsRequiresEpilogMasking =
4691       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4692       !isScalarEpilogueAllowed();
4693   bool StoreAccessWithGapsRequiresMasking =
4694       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4695   if (!PredicatedAccessRequiresMasking &&
4696       !LoadAccessWithGapsRequiresEpilogMasking &&
4697       !StoreAccessWithGapsRequiresMasking)
4698     return true;
4699 
4700   // If masked interleaving is required, we expect that the user/target had
4701   // enabled it, because otherwise it either wouldn't have been created or
4702   // it should have been invalidated by the CostModel.
4703   assert(useMaskedInterleavedAccesses(TTI) &&
4704          "Masked interleave-groups for predicated accesses are not enabled.");
4705 
4706   if (Group->isReverse())
4707     return false;
4708 
4709   auto *Ty = getLoadStoreType(I);
4710   const Align Alignment = getLoadStoreAlignment(I);
4711   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4712                           : TTI.isLegalMaskedStore(Ty, Alignment);
4713 }
4714 
4715 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4716     Instruction *I, ElementCount VF) {
4717   // Get and ensure we have a valid memory instruction.
4718   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4719 
4720   auto *Ptr = getLoadStorePointerOperand(I);
4721   auto *ScalarTy = getLoadStoreType(I);
4722 
4723   // In order to be widened, the pointer should be consecutive, first of all.
4724   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4725     return false;
4726 
4727   // If the instruction is a store located in a predicated block, it will be
4728   // scalarized.
4729   if (isScalarWithPredication(I, VF))
4730     return false;
4731 
4732   // If the instruction's allocated size doesn't equal it's type size, it
4733   // requires padding and will be scalarized.
4734   auto &DL = I->getModule()->getDataLayout();
4735   if (hasIrregularType(ScalarTy, DL))
4736     return false;
4737 
4738   return true;
4739 }
4740 
4741 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4742   // We should not collect Uniforms more than once per VF. Right now,
4743   // this function is called from collectUniformsAndScalars(), which
4744   // already does this check. Collecting Uniforms for VF=1 does not make any
4745   // sense.
4746 
4747   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4748          "This function should not be visited twice for the same VF");
4749 
4750   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4751   // not analyze again.  Uniforms.count(VF) will return 1.
4752   Uniforms[VF].clear();
4753 
4754   // We now know that the loop is vectorizable!
4755   // Collect instructions inside the loop that will remain uniform after
4756   // vectorization.
4757 
4758   // Global values, params and instructions outside of current loop are out of
4759   // scope.
4760   auto isOutOfScope = [&](Value *V) -> bool {
4761     Instruction *I = dyn_cast<Instruction>(V);
4762     return (!I || !TheLoop->contains(I));
4763   };
4764 
4765   // Worklist containing uniform instructions demanding lane 0.
4766   SetVector<Instruction *> Worklist;
4767   BasicBlock *Latch = TheLoop->getLoopLatch();
4768 
4769   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4770   // that are scalar with predication must not be considered uniform after
4771   // vectorization, because that would create an erroneous replicating region
4772   // where only a single instance out of VF should be formed.
4773   // TODO: optimize such seldom cases if found important, see PR40816.
4774   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4775     if (isOutOfScope(I)) {
4776       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
4777                         << *I << "\n");
4778       return;
4779     }
4780     if (isScalarWithPredication(I, VF)) {
4781       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4782                         << *I << "\n");
4783       return;
4784     }
4785     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4786     Worklist.insert(I);
4787   };
4788 
4789   // Start with the conditional branch. If the branch condition is an
4790   // instruction contained in the loop that is only used by the branch, it is
4791   // uniform.
4792   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4793   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4794     addToWorklistIfAllowed(Cmp);
4795 
4796   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
4797     InstWidening WideningDecision = getWideningDecision(I, VF);
4798     assert(WideningDecision != CM_Unknown &&
4799            "Widening decision should be ready at this moment");
4800 
4801     // A uniform memory op is itself uniform.  We exclude uniform stores
4802     // here as they demand the last lane, not the first one.
4803     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
4804       assert(WideningDecision == CM_Scalarize);
4805       return true;
4806     }
4807 
4808     return (WideningDecision == CM_Widen ||
4809             WideningDecision == CM_Widen_Reverse ||
4810             WideningDecision == CM_Interleave);
4811   };
4812 
4813 
4814   // Returns true if Ptr is the pointer operand of a memory access instruction
4815   // I, and I is known to not require scalarization.
4816   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4817     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4818   };
4819 
4820   // Holds a list of values which are known to have at least one uniform use.
4821   // Note that there may be other uses which aren't uniform.  A "uniform use"
4822   // here is something which only demands lane 0 of the unrolled iterations;
4823   // it does not imply that all lanes produce the same value (e.g. this is not
4824   // the usual meaning of uniform)
4825   SetVector<Value *> HasUniformUse;
4826 
4827   // Scan the loop for instructions which are either a) known to have only
4828   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
4829   for (auto *BB : TheLoop->blocks())
4830     for (auto &I : *BB) {
4831       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
4832         switch (II->getIntrinsicID()) {
4833         case Intrinsic::sideeffect:
4834         case Intrinsic::experimental_noalias_scope_decl:
4835         case Intrinsic::assume:
4836         case Intrinsic::lifetime_start:
4837         case Intrinsic::lifetime_end:
4838           if (TheLoop->hasLoopInvariantOperands(&I))
4839             addToWorklistIfAllowed(&I);
4840           break;
4841         default:
4842           break;
4843         }
4844       }
4845 
4846       // ExtractValue instructions must be uniform, because the operands are
4847       // known to be loop-invariant.
4848       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
4849         assert(isOutOfScope(EVI->getAggregateOperand()) &&
4850                "Expected aggregate value to be loop invariant");
4851         addToWorklistIfAllowed(EVI);
4852         continue;
4853       }
4854 
4855       // If there's no pointer operand, there's nothing to do.
4856       auto *Ptr = getLoadStorePointerOperand(&I);
4857       if (!Ptr)
4858         continue;
4859 
4860       // A uniform memory op is itself uniform.  We exclude uniform stores
4861       // here as they demand the last lane, not the first one.
4862       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
4863         addToWorklistIfAllowed(&I);
4864 
4865       if (isUniformDecision(&I, VF)) {
4866         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
4867         HasUniformUse.insert(Ptr);
4868       }
4869     }
4870 
4871   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
4872   // demanding) users.  Since loops are assumed to be in LCSSA form, this
4873   // disallows uses outside the loop as well.
4874   for (auto *V : HasUniformUse) {
4875     if (isOutOfScope(V))
4876       continue;
4877     auto *I = cast<Instruction>(V);
4878     auto UsersAreMemAccesses =
4879       llvm::all_of(I->users(), [&](User *U) -> bool {
4880         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4881       });
4882     if (UsersAreMemAccesses)
4883       addToWorklistIfAllowed(I);
4884   }
4885 
4886   // Expand Worklist in topological order: whenever a new instruction
4887   // is added , its users should be already inside Worklist.  It ensures
4888   // a uniform instruction will only be used by uniform instructions.
4889   unsigned idx = 0;
4890   while (idx != Worklist.size()) {
4891     Instruction *I = Worklist[idx++];
4892 
4893     for (auto OV : I->operand_values()) {
4894       // isOutOfScope operands cannot be uniform instructions.
4895       if (isOutOfScope(OV))
4896         continue;
4897       // First order recurrence Phi's should typically be considered
4898       // non-uniform.
4899       auto *OP = dyn_cast<PHINode>(OV);
4900       if (OP && Legal->isFirstOrderRecurrence(OP))
4901         continue;
4902       // If all the users of the operand are uniform, then add the
4903       // operand into the uniform worklist.
4904       auto *OI = cast<Instruction>(OV);
4905       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4906             auto *J = cast<Instruction>(U);
4907             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4908           }))
4909         addToWorklistIfAllowed(OI);
4910     }
4911   }
4912 
4913   // For an instruction to be added into Worklist above, all its users inside
4914   // the loop should also be in Worklist. However, this condition cannot be
4915   // true for phi nodes that form a cyclic dependence. We must process phi
4916   // nodes separately. An induction variable will remain uniform if all users
4917   // of the induction variable and induction variable update remain uniform.
4918   // The code below handles both pointer and non-pointer induction variables.
4919   for (auto &Induction : Legal->getInductionVars()) {
4920     auto *Ind = Induction.first;
4921     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4922 
4923     // Determine if all users of the induction variable are uniform after
4924     // vectorization.
4925     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4926       auto *I = cast<Instruction>(U);
4927       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4928              isVectorizedMemAccessUse(I, Ind);
4929     });
4930     if (!UniformInd)
4931       continue;
4932 
4933     // Determine if all users of the induction variable update instruction are
4934     // uniform after vectorization.
4935     auto UniformIndUpdate =
4936         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4937           auto *I = cast<Instruction>(U);
4938           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4939                  isVectorizedMemAccessUse(I, IndUpdate);
4940         });
4941     if (!UniformIndUpdate)
4942       continue;
4943 
4944     // The induction variable and its update instruction will remain uniform.
4945     addToWorklistIfAllowed(Ind);
4946     addToWorklistIfAllowed(IndUpdate);
4947   }
4948 
4949   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4950 }
4951 
4952 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4953   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4954 
4955   if (Legal->getRuntimePointerChecking()->Need) {
4956     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4957         "runtime pointer checks needed. Enable vectorization of this "
4958         "loop with '#pragma clang loop vectorize(enable)' when "
4959         "compiling with -Os/-Oz",
4960         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4961     return true;
4962   }
4963 
4964   if (!PSE.getPredicate().isAlwaysTrue()) {
4965     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4966         "runtime SCEV checks needed. Enable vectorization of this "
4967         "loop with '#pragma clang loop vectorize(enable)' when "
4968         "compiling with -Os/-Oz",
4969         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4970     return true;
4971   }
4972 
4973   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4974   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4975     reportVectorizationFailure("Runtime stride check for small trip count",
4976         "runtime stride == 1 checks needed. Enable vectorization of "
4977         "this loop without such check by compiling with -Os/-Oz",
4978         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4979     return true;
4980   }
4981 
4982   return false;
4983 }
4984 
4985 ElementCount
4986 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
4987   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
4988     return ElementCount::getScalable(0);
4989 
4990   if (Hints->isScalableVectorizationDisabled()) {
4991     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
4992                             "ScalableVectorizationDisabled", ORE, TheLoop);
4993     return ElementCount::getScalable(0);
4994   }
4995 
4996   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
4997 
4998   auto MaxScalableVF = ElementCount::getScalable(
4999       std::numeric_limits<ElementCount::ScalarTy>::max());
5000 
5001   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5002   // FIXME: While for scalable vectors this is currently sufficient, this should
5003   // be replaced by a more detailed mechanism that filters out specific VFs,
5004   // instead of invalidating vectorization for a whole set of VFs based on the
5005   // MaxVF.
5006 
5007   // Disable scalable vectorization if the loop contains unsupported reductions.
5008   if (!canVectorizeReductions(MaxScalableVF)) {
5009     reportVectorizationInfo(
5010         "Scalable vectorization not supported for the reduction "
5011         "operations found in this loop.",
5012         "ScalableVFUnfeasible", ORE, TheLoop);
5013     return ElementCount::getScalable(0);
5014   }
5015 
5016   // Disable scalable vectorization if the loop contains any instructions
5017   // with element types not supported for scalable vectors.
5018   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5019         return !Ty->isVoidTy() &&
5020                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5021       })) {
5022     reportVectorizationInfo("Scalable vectorization is not supported "
5023                             "for all element types found in this loop.",
5024                             "ScalableVFUnfeasible", ORE, TheLoop);
5025     return ElementCount::getScalable(0);
5026   }
5027 
5028   if (Legal->isSafeForAnyVectorWidth())
5029     return MaxScalableVF;
5030 
5031   // Limit MaxScalableVF by the maximum safe dependence distance.
5032   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5033   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5034     MaxVScale =
5035         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5036   MaxScalableVF = ElementCount::getScalable(
5037       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5038   if (!MaxScalableVF)
5039     reportVectorizationInfo(
5040         "Max legal vector width too small, scalable vectorization "
5041         "unfeasible.",
5042         "ScalableVFUnfeasible", ORE, TheLoop);
5043 
5044   return MaxScalableVF;
5045 }
5046 
5047 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5048     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5049   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5050   unsigned SmallestType, WidestType;
5051   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5052 
5053   // Get the maximum safe dependence distance in bits computed by LAA.
5054   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5055   // the memory accesses that is most restrictive (involved in the smallest
5056   // dependence distance).
5057   unsigned MaxSafeElements =
5058       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5059 
5060   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5061   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5062 
5063   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5064                     << ".\n");
5065   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5066                     << ".\n");
5067 
5068   // First analyze the UserVF, fall back if the UserVF should be ignored.
5069   if (UserVF) {
5070     auto MaxSafeUserVF =
5071         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5072 
5073     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5074       // If `VF=vscale x N` is safe, then so is `VF=N`
5075       if (UserVF.isScalable())
5076         return FixedScalableVFPair(
5077             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5078       else
5079         return UserVF;
5080     }
5081 
5082     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5083 
5084     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5085     // is better to ignore the hint and let the compiler choose a suitable VF.
5086     if (!UserVF.isScalable()) {
5087       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5088                         << " is unsafe, clamping to max safe VF="
5089                         << MaxSafeFixedVF << ".\n");
5090       ORE->emit([&]() {
5091         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5092                                           TheLoop->getStartLoc(),
5093                                           TheLoop->getHeader())
5094                << "User-specified vectorization factor "
5095                << ore::NV("UserVectorizationFactor", UserVF)
5096                << " is unsafe, clamping to maximum safe vectorization factor "
5097                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5098       });
5099       return MaxSafeFixedVF;
5100     }
5101 
5102     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5103       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5104                         << " is ignored because scalable vectors are not "
5105                            "available.\n");
5106       ORE->emit([&]() {
5107         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5108                                           TheLoop->getStartLoc(),
5109                                           TheLoop->getHeader())
5110                << "User-specified vectorization factor "
5111                << ore::NV("UserVectorizationFactor", UserVF)
5112                << " is ignored because the target does not support scalable "
5113                   "vectors. The compiler will pick a more suitable value.";
5114       });
5115     } else {
5116       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5117                         << " is unsafe. Ignoring scalable UserVF.\n");
5118       ORE->emit([&]() {
5119         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5120                                           TheLoop->getStartLoc(),
5121                                           TheLoop->getHeader())
5122                << "User-specified vectorization factor "
5123                << ore::NV("UserVectorizationFactor", UserVF)
5124                << " is unsafe. Ignoring the hint to let the compiler pick a "
5125                   "more suitable value.";
5126       });
5127     }
5128   }
5129 
5130   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5131                     << " / " << WidestType << " bits.\n");
5132 
5133   FixedScalableVFPair Result(ElementCount::getFixed(1),
5134                              ElementCount::getScalable(0));
5135   if (auto MaxVF =
5136           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5137                                   MaxSafeFixedVF, FoldTailByMasking))
5138     Result.FixedVF = MaxVF;
5139 
5140   if (auto MaxVF =
5141           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5142                                   MaxSafeScalableVF, FoldTailByMasking))
5143     if (MaxVF.isScalable()) {
5144       Result.ScalableVF = MaxVF;
5145       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5146                         << "\n");
5147     }
5148 
5149   return Result;
5150 }
5151 
5152 FixedScalableVFPair
5153 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5154   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5155     // TODO: It may by useful to do since it's still likely to be dynamically
5156     // uniform if the target can skip.
5157     reportVectorizationFailure(
5158         "Not inserting runtime ptr check for divergent target",
5159         "runtime pointer checks needed. Not enabled for divergent target",
5160         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5161     return FixedScalableVFPair::getNone();
5162   }
5163 
5164   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5165   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5166   if (TC == 1) {
5167     reportVectorizationFailure("Single iteration (non) loop",
5168         "loop trip count is one, irrelevant for vectorization",
5169         "SingleIterationLoop", ORE, TheLoop);
5170     return FixedScalableVFPair::getNone();
5171   }
5172 
5173   switch (ScalarEpilogueStatus) {
5174   case CM_ScalarEpilogueAllowed:
5175     return computeFeasibleMaxVF(TC, UserVF, false);
5176   case CM_ScalarEpilogueNotAllowedUsePredicate:
5177     LLVM_FALLTHROUGH;
5178   case CM_ScalarEpilogueNotNeededUsePredicate:
5179     LLVM_DEBUG(
5180         dbgs() << "LV: vector predicate hint/switch found.\n"
5181                << "LV: Not allowing scalar epilogue, creating predicated "
5182                << "vector loop.\n");
5183     break;
5184   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5185     // fallthrough as a special case of OptForSize
5186   case CM_ScalarEpilogueNotAllowedOptSize:
5187     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5188       LLVM_DEBUG(
5189           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5190     else
5191       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5192                         << "count.\n");
5193 
5194     // Bail if runtime checks are required, which are not good when optimising
5195     // for size.
5196     if (runtimeChecksRequired())
5197       return FixedScalableVFPair::getNone();
5198 
5199     break;
5200   }
5201 
5202   // The only loops we can vectorize without a scalar epilogue, are loops with
5203   // a bottom-test and a single exiting block. We'd have to handle the fact
5204   // that not every instruction executes on the last iteration.  This will
5205   // require a lane mask which varies through the vector loop body.  (TODO)
5206   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5207     // If there was a tail-folding hint/switch, but we can't fold the tail by
5208     // masking, fallback to a vectorization with a scalar epilogue.
5209     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5210       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5211                            "scalar epilogue instead.\n");
5212       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5213       return computeFeasibleMaxVF(TC, UserVF, false);
5214     }
5215     return FixedScalableVFPair::getNone();
5216   }
5217 
5218   // Now try the tail folding
5219 
5220   // Invalidate interleave groups that require an epilogue if we can't mask
5221   // the interleave-group.
5222   if (!useMaskedInterleavedAccesses(TTI)) {
5223     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5224            "No decisions should have been taken at this point");
5225     // Note: There is no need to invalidate any cost modeling decisions here, as
5226     // non where taken so far.
5227     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5228   }
5229 
5230   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5231   // Avoid tail folding if the trip count is known to be a multiple of any VF
5232   // we chose.
5233   // FIXME: The condition below pessimises the case for fixed-width vectors,
5234   // when scalable VFs are also candidates for vectorization.
5235   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5236     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5237     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5238            "MaxFixedVF must be a power of 2");
5239     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5240                                    : MaxFixedVF.getFixedValue();
5241     ScalarEvolution *SE = PSE.getSE();
5242     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5243     const SCEV *ExitCount = SE->getAddExpr(
5244         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5245     const SCEV *Rem = SE->getURemExpr(
5246         SE->applyLoopGuards(ExitCount, TheLoop),
5247         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5248     if (Rem->isZero()) {
5249       // Accept MaxFixedVF if we do not have a tail.
5250       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5251       return MaxFactors;
5252     }
5253   }
5254 
5255   // For scalable vectors don't use tail folding for low trip counts or
5256   // optimizing for code size. We only permit this if the user has explicitly
5257   // requested it.
5258   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5259       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5260       MaxFactors.ScalableVF.isVector())
5261     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5262 
5263   // If we don't know the precise trip count, or if the trip count that we
5264   // found modulo the vectorization factor is not zero, try to fold the tail
5265   // by masking.
5266   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5267   if (Legal->prepareToFoldTailByMasking()) {
5268     FoldTailByMasking = true;
5269     return MaxFactors;
5270   }
5271 
5272   // If there was a tail-folding hint/switch, but we can't fold the tail by
5273   // masking, fallback to a vectorization with a scalar epilogue.
5274   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5275     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5276                          "scalar epilogue instead.\n");
5277     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5278     return MaxFactors;
5279   }
5280 
5281   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5282     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5283     return FixedScalableVFPair::getNone();
5284   }
5285 
5286   if (TC == 0) {
5287     reportVectorizationFailure(
5288         "Unable to calculate the loop count due to complex control flow",
5289         "unable to calculate the loop count due to complex control flow",
5290         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5291     return FixedScalableVFPair::getNone();
5292   }
5293 
5294   reportVectorizationFailure(
5295       "Cannot optimize for size and vectorize at the same time.",
5296       "cannot optimize for size and vectorize at the same time. "
5297       "Enable vectorization of this loop with '#pragma clang loop "
5298       "vectorize(enable)' when compiling with -Os/-Oz",
5299       "NoTailLoopWithOptForSize", ORE, TheLoop);
5300   return FixedScalableVFPair::getNone();
5301 }
5302 
5303 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5304     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5305     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5306   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5307   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5308       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5309                            : TargetTransformInfo::RGK_FixedWidthVector);
5310 
5311   // Convenience function to return the minimum of two ElementCounts.
5312   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5313     assert((LHS.isScalable() == RHS.isScalable()) &&
5314            "Scalable flags must match");
5315     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5316   };
5317 
5318   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5319   // Note that both WidestRegister and WidestType may not be a powers of 2.
5320   auto MaxVectorElementCount = ElementCount::get(
5321       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5322       ComputeScalableMaxVF);
5323   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5324   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5325                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5326 
5327   if (!MaxVectorElementCount) {
5328     LLVM_DEBUG(dbgs() << "LV: The target has no "
5329                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5330                       << " vector registers.\n");
5331     return ElementCount::getFixed(1);
5332   }
5333 
5334   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5335   if (ConstTripCount &&
5336       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5337       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5338     // If loop trip count (TC) is known at compile time there is no point in
5339     // choosing VF greater than TC (as done in the loop below). Select maximum
5340     // power of two which doesn't exceed TC.
5341     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5342     // when the TC is less than or equal to the known number of lanes.
5343     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5344     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5345                          "exceeding the constant trip count: "
5346                       << ClampedConstTripCount << "\n");
5347     return ElementCount::getFixed(ClampedConstTripCount);
5348   }
5349 
5350   ElementCount MaxVF = MaxVectorElementCount;
5351   if (TTI.shouldMaximizeVectorBandwidth() ||
5352       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5353     auto MaxVectorElementCountMaxBW = ElementCount::get(
5354         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5355         ComputeScalableMaxVF);
5356     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5357 
5358     // Collect all viable vectorization factors larger than the default MaxVF
5359     // (i.e. MaxVectorElementCount).
5360     SmallVector<ElementCount, 8> VFs;
5361     for (ElementCount VS = MaxVectorElementCount * 2;
5362          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5363       VFs.push_back(VS);
5364 
5365     // For each VF calculate its register usage.
5366     auto RUs = calculateRegisterUsage(VFs);
5367 
5368     // Select the largest VF which doesn't require more registers than existing
5369     // ones.
5370     for (int i = RUs.size() - 1; i >= 0; --i) {
5371       bool Selected = true;
5372       for (auto &pair : RUs[i].MaxLocalUsers) {
5373         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5374         if (pair.second > TargetNumRegisters)
5375           Selected = false;
5376       }
5377       if (Selected) {
5378         MaxVF = VFs[i];
5379         break;
5380       }
5381     }
5382     if (ElementCount MinVF =
5383             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5384       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5385         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5386                           << ") with target's minimum: " << MinVF << '\n');
5387         MaxVF = MinVF;
5388       }
5389     }
5390   }
5391   return MaxVF;
5392 }
5393 
5394 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
5395   if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5396     auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
5397     auto Min = Attr.getVScaleRangeMin();
5398     auto Max = Attr.getVScaleRangeMax();
5399     if (Max && Min == Max)
5400       return Max;
5401   }
5402 
5403   return TTI.getVScaleForTuning();
5404 }
5405 
5406 bool LoopVectorizationCostModel::isMoreProfitable(
5407     const VectorizationFactor &A, const VectorizationFactor &B) const {
5408   InstructionCost CostA = A.Cost;
5409   InstructionCost CostB = B.Cost;
5410 
5411   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5412 
5413   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5414       MaxTripCount) {
5415     // If we are folding the tail and the trip count is a known (possibly small)
5416     // constant, the trip count will be rounded up to an integer number of
5417     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5418     // which we compare directly. When not folding the tail, the total cost will
5419     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5420     // approximated with the per-lane cost below instead of using the tripcount
5421     // as here.
5422     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5423     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5424     return RTCostA < RTCostB;
5425   }
5426 
5427   // Improve estimate for the vector width if it is scalable.
5428   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5429   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5430   if (Optional<unsigned> VScale = getVScaleForTuning()) {
5431     if (A.Width.isScalable())
5432       EstimatedWidthA *= VScale.getValue();
5433     if (B.Width.isScalable())
5434       EstimatedWidthB *= VScale.getValue();
5435   }
5436 
5437   // Assume vscale may be larger than 1 (or the value being tuned for),
5438   // so that scalable vectorization is slightly favorable over fixed-width
5439   // vectorization.
5440   if (A.Width.isScalable() && !B.Width.isScalable())
5441     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5442 
5443   // To avoid the need for FP division:
5444   //      (CostA / A.Width) < (CostB / B.Width)
5445   // <=>  (CostA * B.Width) < (CostB * A.Width)
5446   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5447 }
5448 
5449 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5450     const ElementCountSet &VFCandidates) {
5451   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5452   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5453   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5454   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5455          "Expected Scalar VF to be a candidate");
5456 
5457   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5458   VectorizationFactor ChosenFactor = ScalarCost;
5459 
5460   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5461   if (ForceVectorization && VFCandidates.size() > 1) {
5462     // Ignore scalar width, because the user explicitly wants vectorization.
5463     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5464     // evaluation.
5465     ChosenFactor.Cost = InstructionCost::getMax();
5466   }
5467 
5468   SmallVector<InstructionVFPair> InvalidCosts;
5469   for (const auto &i : VFCandidates) {
5470     // The cost for scalar VF=1 is already calculated, so ignore it.
5471     if (i.isScalar())
5472       continue;
5473 
5474     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5475     VectorizationFactor Candidate(i, C.first);
5476 
5477 #ifndef NDEBUG
5478     unsigned AssumedMinimumVscale = 1;
5479     if (Optional<unsigned> VScale = getVScaleForTuning())
5480       AssumedMinimumVscale = VScale.getValue();
5481     unsigned Width =
5482         Candidate.Width.isScalable()
5483             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5484             : Candidate.Width.getFixedValue();
5485     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5486                       << " costs: " << (Candidate.Cost / Width));
5487     if (i.isScalable())
5488       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5489                         << AssumedMinimumVscale << ")");
5490     LLVM_DEBUG(dbgs() << ".\n");
5491 #endif
5492 
5493     if (!C.second && !ForceVectorization) {
5494       LLVM_DEBUG(
5495           dbgs() << "LV: Not considering vector loop of width " << i
5496                  << " because it will not generate any vector instructions.\n");
5497       continue;
5498     }
5499 
5500     // If profitable add it to ProfitableVF list.
5501     if (isMoreProfitable(Candidate, ScalarCost))
5502       ProfitableVFs.push_back(Candidate);
5503 
5504     if (isMoreProfitable(Candidate, ChosenFactor))
5505       ChosenFactor = Candidate;
5506   }
5507 
5508   // Emit a report of VFs with invalid costs in the loop.
5509   if (!InvalidCosts.empty()) {
5510     // Group the remarks per instruction, keeping the instruction order from
5511     // InvalidCosts.
5512     std::map<Instruction *, unsigned> Numbering;
5513     unsigned I = 0;
5514     for (auto &Pair : InvalidCosts)
5515       if (!Numbering.count(Pair.first))
5516         Numbering[Pair.first] = I++;
5517 
5518     // Sort the list, first on instruction(number) then on VF.
5519     llvm::sort(InvalidCosts,
5520                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5521                  if (Numbering[A.first] != Numbering[B.first])
5522                    return Numbering[A.first] < Numbering[B.first];
5523                  ElementCountComparator ECC;
5524                  return ECC(A.second, B.second);
5525                });
5526 
5527     // For a list of ordered instruction-vf pairs:
5528     //   [(load, vf1), (load, vf2), (store, vf1)]
5529     // Group the instructions together to emit separate remarks for:
5530     //   load  (vf1, vf2)
5531     //   store (vf1)
5532     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5533     auto Subset = ArrayRef<InstructionVFPair>();
5534     do {
5535       if (Subset.empty())
5536         Subset = Tail.take_front(1);
5537 
5538       Instruction *I = Subset.front().first;
5539 
5540       // If the next instruction is different, or if there are no other pairs,
5541       // emit a remark for the collated subset. e.g.
5542       //   [(load, vf1), (load, vf2))]
5543       // to emit:
5544       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5545       if (Subset == Tail || Tail[Subset.size()].first != I) {
5546         std::string OutString;
5547         raw_string_ostream OS(OutString);
5548         assert(!Subset.empty() && "Unexpected empty range");
5549         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5550         for (auto &Pair : Subset)
5551           OS << (Pair.second == Subset.front().second ? "" : ", ")
5552              << Pair.second;
5553         OS << "):";
5554         if (auto *CI = dyn_cast<CallInst>(I))
5555           OS << " call to " << CI->getCalledFunction()->getName();
5556         else
5557           OS << " " << I->getOpcodeName();
5558         OS.flush();
5559         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5560         Tail = Tail.drop_front(Subset.size());
5561         Subset = {};
5562       } else
5563         // Grow the subset by one element
5564         Subset = Tail.take_front(Subset.size() + 1);
5565     } while (!Tail.empty());
5566   }
5567 
5568   if (!EnableCondStoresVectorization && NumPredStores) {
5569     reportVectorizationFailure("There are conditional stores.",
5570         "store that is conditionally executed prevents vectorization",
5571         "ConditionalStore", ORE, TheLoop);
5572     ChosenFactor = ScalarCost;
5573   }
5574 
5575   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5576                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5577              << "LV: Vectorization seems to be not beneficial, "
5578              << "but was forced by a user.\n");
5579   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5580   return ChosenFactor;
5581 }
5582 
5583 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5584     const Loop &L, ElementCount VF) const {
5585   // Cross iteration phis such as reductions need special handling and are
5586   // currently unsupported.
5587   if (any_of(L.getHeader()->phis(),
5588              [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
5589     return false;
5590 
5591   // Phis with uses outside of the loop require special handling and are
5592   // currently unsupported.
5593   for (auto &Entry : Legal->getInductionVars()) {
5594     // Look for uses of the value of the induction at the last iteration.
5595     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5596     for (User *U : PostInc->users())
5597       if (!L.contains(cast<Instruction>(U)))
5598         return false;
5599     // Look for uses of penultimate value of the induction.
5600     for (User *U : Entry.first->users())
5601       if (!L.contains(cast<Instruction>(U)))
5602         return false;
5603   }
5604 
5605   // Induction variables that are widened require special handling that is
5606   // currently not supported.
5607   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5608         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5609                  this->isProfitableToScalarize(Entry.first, VF));
5610       }))
5611     return false;
5612 
5613   // Epilogue vectorization code has not been auditted to ensure it handles
5614   // non-latch exits properly.  It may be fine, but it needs auditted and
5615   // tested.
5616   if (L.getExitingBlock() != L.getLoopLatch())
5617     return false;
5618 
5619   return true;
5620 }
5621 
5622 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5623     const ElementCount VF) const {
5624   // FIXME: We need a much better cost-model to take different parameters such
5625   // as register pressure, code size increase and cost of extra branches into
5626   // account. For now we apply a very crude heuristic and only consider loops
5627   // with vectorization factors larger than a certain value.
5628   // We also consider epilogue vectorization unprofitable for targets that don't
5629   // consider interleaving beneficial (eg. MVE).
5630   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5631     return false;
5632   // FIXME: We should consider changing the threshold for scalable
5633   // vectors to take VScaleForTuning into account.
5634   if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
5635     return true;
5636   return false;
5637 }
5638 
5639 VectorizationFactor
5640 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5641     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5642   VectorizationFactor Result = VectorizationFactor::Disabled();
5643   if (!EnableEpilogueVectorization) {
5644     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5645     return Result;
5646   }
5647 
5648   if (!isScalarEpilogueAllowed()) {
5649     LLVM_DEBUG(
5650         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5651                   "allowed.\n";);
5652     return Result;
5653   }
5654 
5655   // Not really a cost consideration, but check for unsupported cases here to
5656   // simplify the logic.
5657   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5658     LLVM_DEBUG(
5659         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5660                   "not a supported candidate.\n";);
5661     return Result;
5662   }
5663 
5664   if (EpilogueVectorizationForceVF > 1) {
5665     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5666     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5667     if (LVP.hasPlanWithVF(ForcedEC))
5668       return {ForcedEC, 0};
5669     else {
5670       LLVM_DEBUG(
5671           dbgs()
5672               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5673       return Result;
5674     }
5675   }
5676 
5677   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5678       TheLoop->getHeader()->getParent()->hasMinSize()) {
5679     LLVM_DEBUG(
5680         dbgs()
5681             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5682     return Result;
5683   }
5684 
5685   if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
5686     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5687                          "this loop\n");
5688     return Result;
5689   }
5690 
5691   // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
5692   // the main loop handles 8 lanes per iteration. We could still benefit from
5693   // vectorizing the epilogue loop with VF=4.
5694   ElementCount EstimatedRuntimeVF = MainLoopVF;
5695   if (MainLoopVF.isScalable()) {
5696     EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5697     if (Optional<unsigned> VScale = getVScaleForTuning())
5698       EstimatedRuntimeVF *= VScale.getValue();
5699   }
5700 
5701   for (auto &NextVF : ProfitableVFs)
5702     if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
5703           ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
5704          ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
5705         (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
5706         LVP.hasPlanWithVF(NextVF.Width))
5707       Result = NextVF;
5708 
5709   if (Result != VectorizationFactor::Disabled())
5710     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5711                       << Result.Width << "\n";);
5712   return Result;
5713 }
5714 
5715 std::pair<unsigned, unsigned>
5716 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5717   unsigned MinWidth = -1U;
5718   unsigned MaxWidth = 8;
5719   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5720   // For in-loop reductions, no element types are added to ElementTypesInLoop
5721   // if there are no loads/stores in the loop. In this case, check through the
5722   // reduction variables to determine the maximum width.
5723   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5724     // Reset MaxWidth so that we can find the smallest type used by recurrences
5725     // in the loop.
5726     MaxWidth = -1U;
5727     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5728       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5729       // When finding the min width used by the recurrence we need to account
5730       // for casts on the input operands of the recurrence.
5731       MaxWidth = std::min<unsigned>(
5732           MaxWidth, std::min<unsigned>(
5733                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5734                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5735     }
5736   } else {
5737     for (Type *T : ElementTypesInLoop) {
5738       MinWidth = std::min<unsigned>(
5739           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5740       MaxWidth = std::max<unsigned>(
5741           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5742     }
5743   }
5744   return {MinWidth, MaxWidth};
5745 }
5746 
5747 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5748   ElementTypesInLoop.clear();
5749   // For each block.
5750   for (BasicBlock *BB : TheLoop->blocks()) {
5751     // For each instruction in the loop.
5752     for (Instruction &I : BB->instructionsWithoutDebug()) {
5753       Type *T = I.getType();
5754 
5755       // Skip ignored values.
5756       if (ValuesToIgnore.count(&I))
5757         continue;
5758 
5759       // Only examine Loads, Stores and PHINodes.
5760       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5761         continue;
5762 
5763       // Examine PHI nodes that are reduction variables. Update the type to
5764       // account for the recurrence type.
5765       if (auto *PN = dyn_cast<PHINode>(&I)) {
5766         if (!Legal->isReductionVariable(PN))
5767           continue;
5768         const RecurrenceDescriptor &RdxDesc =
5769             Legal->getReductionVars().find(PN)->second;
5770         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5771             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5772                                       RdxDesc.getRecurrenceType(),
5773                                       TargetTransformInfo::ReductionFlags()))
5774           continue;
5775         T = RdxDesc.getRecurrenceType();
5776       }
5777 
5778       // Examine the stored values.
5779       if (auto *ST = dyn_cast<StoreInst>(&I))
5780         T = ST->getValueOperand()->getType();
5781 
5782       assert(T->isSized() &&
5783              "Expected the load/store/recurrence type to be sized");
5784 
5785       ElementTypesInLoop.insert(T);
5786     }
5787   }
5788 }
5789 
5790 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5791                                                            unsigned LoopCost) {
5792   // -- The interleave heuristics --
5793   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5794   // There are many micro-architectural considerations that we can't predict
5795   // at this level. For example, frontend pressure (on decode or fetch) due to
5796   // code size, or the number and capabilities of the execution ports.
5797   //
5798   // We use the following heuristics to select the interleave count:
5799   // 1. If the code has reductions, then we interleave to break the cross
5800   // iteration dependency.
5801   // 2. If the loop is really small, then we interleave to reduce the loop
5802   // overhead.
5803   // 3. We don't interleave if we think that we will spill registers to memory
5804   // due to the increased register pressure.
5805 
5806   if (!isScalarEpilogueAllowed())
5807     return 1;
5808 
5809   // We used the distance for the interleave count.
5810   if (Legal->getMaxSafeDepDistBytes() != -1U)
5811     return 1;
5812 
5813   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5814   const bool HasReductions = !Legal->getReductionVars().empty();
5815   // Do not interleave loops with a relatively small known or estimated trip
5816   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5817   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5818   // because with the above conditions interleaving can expose ILP and break
5819   // cross iteration dependences for reductions.
5820   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5821       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5822     return 1;
5823 
5824   RegisterUsage R = calculateRegisterUsage({VF})[0];
5825   // We divide by these constants so assume that we have at least one
5826   // instruction that uses at least one register.
5827   for (auto& pair : R.MaxLocalUsers) {
5828     pair.second = std::max(pair.second, 1U);
5829   }
5830 
5831   // We calculate the interleave count using the following formula.
5832   // Subtract the number of loop invariants from the number of available
5833   // registers. These registers are used by all of the interleaved instances.
5834   // Next, divide the remaining registers by the number of registers that is
5835   // required by the loop, in order to estimate how many parallel instances
5836   // fit without causing spills. All of this is rounded down if necessary to be
5837   // a power of two. We want power of two interleave count to simplify any
5838   // addressing operations or alignment considerations.
5839   // We also want power of two interleave counts to ensure that the induction
5840   // variable of the vector loop wraps to zero, when tail is folded by masking;
5841   // this currently happens when OptForSize, in which case IC is set to 1 above.
5842   unsigned IC = UINT_MAX;
5843 
5844   for (auto& pair : R.MaxLocalUsers) {
5845     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5846     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5847                       << " registers of "
5848                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5849     if (VF.isScalar()) {
5850       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5851         TargetNumRegisters = ForceTargetNumScalarRegs;
5852     } else {
5853       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5854         TargetNumRegisters = ForceTargetNumVectorRegs;
5855     }
5856     unsigned MaxLocalUsers = pair.second;
5857     unsigned LoopInvariantRegs = 0;
5858     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5859       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5860 
5861     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5862     // Don't count the induction variable as interleaved.
5863     if (EnableIndVarRegisterHeur) {
5864       TmpIC =
5865           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5866                         std::max(1U, (MaxLocalUsers - 1)));
5867     }
5868 
5869     IC = std::min(IC, TmpIC);
5870   }
5871 
5872   // Clamp the interleave ranges to reasonable counts.
5873   unsigned MaxInterleaveCount =
5874       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
5875 
5876   // Check if the user has overridden the max.
5877   if (VF.isScalar()) {
5878     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5879       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5880   } else {
5881     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5882       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5883   }
5884 
5885   // If trip count is known or estimated compile time constant, limit the
5886   // interleave count to be less than the trip count divided by VF, provided it
5887   // is at least 1.
5888   //
5889   // For scalable vectors we can't know if interleaving is beneficial. It may
5890   // not be beneficial for small loops if none of the lanes in the second vector
5891   // iterations is enabled. However, for larger loops, there is likely to be a
5892   // similar benefit as for fixed-width vectors. For now, we choose to leave
5893   // the InterleaveCount as if vscale is '1', although if some information about
5894   // the vector is known (e.g. min vector size), we can make a better decision.
5895   if (BestKnownTC) {
5896     MaxInterleaveCount =
5897         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
5898     // Make sure MaxInterleaveCount is greater than 0.
5899     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
5900   }
5901 
5902   assert(MaxInterleaveCount > 0 &&
5903          "Maximum interleave count must be greater than 0");
5904 
5905   // Clamp the calculated IC to be between the 1 and the max interleave count
5906   // that the target and trip count allows.
5907   if (IC > MaxInterleaveCount)
5908     IC = MaxInterleaveCount;
5909   else
5910     // Make sure IC is greater than 0.
5911     IC = std::max(1u, IC);
5912 
5913   assert(IC > 0 && "Interleave count must be greater than 0.");
5914 
5915   // If we did not calculate the cost for VF (because the user selected the VF)
5916   // then we calculate the cost of VF here.
5917   if (LoopCost == 0) {
5918     InstructionCost C = expectedCost(VF).first;
5919     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
5920     LoopCost = *C.getValue();
5921   }
5922 
5923   assert(LoopCost && "Non-zero loop cost expected");
5924 
5925   // Interleave if we vectorized this loop and there is a reduction that could
5926   // benefit from interleaving.
5927   if (VF.isVector() && HasReductions) {
5928     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5929     return IC;
5930   }
5931 
5932   // For any scalar loop that either requires runtime checks or predication we
5933   // are better off leaving this to the unroller. Note that if we've already
5934   // vectorized the loop we will have done the runtime check and so interleaving
5935   // won't require further checks.
5936   bool ScalarInterleavingRequiresPredication =
5937       (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
5938          return Legal->blockNeedsPredication(BB);
5939        }));
5940   bool ScalarInterleavingRequiresRuntimePointerCheck =
5941       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
5942 
5943   // We want to interleave small loops in order to reduce the loop overhead and
5944   // potentially expose ILP opportunities.
5945   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
5946                     << "LV: IC is " << IC << '\n'
5947                     << "LV: VF is " << VF << '\n');
5948   const bool AggressivelyInterleaveReductions =
5949       TTI.enableAggressiveInterleaving(HasReductions);
5950   if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5951       !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
5952     // We assume that the cost overhead is 1 and we use the cost model
5953     // to estimate the cost of the loop and interleave until the cost of the
5954     // loop overhead is about 5% of the cost of the loop.
5955     unsigned SmallIC =
5956         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5957 
5958     // Interleave until store/load ports (estimated by max interleave count) are
5959     // saturated.
5960     unsigned NumStores = Legal->getNumStores();
5961     unsigned NumLoads = Legal->getNumLoads();
5962     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5963     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5964 
5965     // There is little point in interleaving for reductions containing selects
5966     // and compares when VF=1 since it may just create more overhead than it's
5967     // worth for loops with small trip counts. This is because we still have to
5968     // do the final reduction after the loop.
5969     bool HasSelectCmpReductions =
5970         HasReductions &&
5971         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5972           const RecurrenceDescriptor &RdxDesc = Reduction.second;
5973           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
5974               RdxDesc.getRecurrenceKind());
5975         });
5976     if (HasSelectCmpReductions) {
5977       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
5978       return 1;
5979     }
5980 
5981     // If we have a scalar reduction (vector reductions are already dealt with
5982     // by this point), we can increase the critical path length if the loop
5983     // we're interleaving is inside another loop. For tree-wise reductions
5984     // set the limit to 2, and for ordered reductions it's best to disable
5985     // interleaving entirely.
5986     if (HasReductions && TheLoop->getLoopDepth() > 1) {
5987       bool HasOrderedReductions =
5988           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5989             const RecurrenceDescriptor &RdxDesc = Reduction.second;
5990             return RdxDesc.isOrdered();
5991           });
5992       if (HasOrderedReductions) {
5993         LLVM_DEBUG(
5994             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
5995         return 1;
5996       }
5997 
5998       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5999       SmallIC = std::min(SmallIC, F);
6000       StoresIC = std::min(StoresIC, F);
6001       LoadsIC = std::min(LoadsIC, F);
6002     }
6003 
6004     if (EnableLoadStoreRuntimeInterleave &&
6005         std::max(StoresIC, LoadsIC) > SmallIC) {
6006       LLVM_DEBUG(
6007           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6008       return std::max(StoresIC, LoadsIC);
6009     }
6010 
6011     // If there are scalar reductions and TTI has enabled aggressive
6012     // interleaving for reductions, we will interleave to expose ILP.
6013     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6014         AggressivelyInterleaveReductions) {
6015       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6016       // Interleave no less than SmallIC but not as aggressive as the normal IC
6017       // to satisfy the rare situation when resources are too limited.
6018       return std::max(IC / 2, SmallIC);
6019     } else {
6020       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6021       return SmallIC;
6022     }
6023   }
6024 
6025   // Interleave if this is a large loop (small loops are already dealt with by
6026   // this point) that could benefit from interleaving.
6027   if (AggressivelyInterleaveReductions) {
6028     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6029     return IC;
6030   }
6031 
6032   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6033   return 1;
6034 }
6035 
6036 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6037 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6038   // This function calculates the register usage by measuring the highest number
6039   // of values that are alive at a single location. Obviously, this is a very
6040   // rough estimation. We scan the loop in a topological order in order and
6041   // assign a number to each instruction. We use RPO to ensure that defs are
6042   // met before their users. We assume that each instruction that has in-loop
6043   // users starts an interval. We record every time that an in-loop value is
6044   // used, so we have a list of the first and last occurrences of each
6045   // instruction. Next, we transpose this data structure into a multi map that
6046   // holds the list of intervals that *end* at a specific location. This multi
6047   // map allows us to perform a linear search. We scan the instructions linearly
6048   // and record each time that a new interval starts, by placing it in a set.
6049   // If we find this value in the multi-map then we remove it from the set.
6050   // The max register usage is the maximum size of the set.
6051   // We also search for instructions that are defined outside the loop, but are
6052   // used inside the loop. We need this number separately from the max-interval
6053   // usage number because when we unroll, loop-invariant values do not take
6054   // more register.
6055   LoopBlocksDFS DFS(TheLoop);
6056   DFS.perform(LI);
6057 
6058   RegisterUsage RU;
6059 
6060   // Each 'key' in the map opens a new interval. The values
6061   // of the map are the index of the 'last seen' usage of the
6062   // instruction that is the key.
6063   using IntervalMap = DenseMap<Instruction *, unsigned>;
6064 
6065   // Maps instruction to its index.
6066   SmallVector<Instruction *, 64> IdxToInstr;
6067   // Marks the end of each interval.
6068   IntervalMap EndPoint;
6069   // Saves the list of instruction indices that are used in the loop.
6070   SmallPtrSet<Instruction *, 8> Ends;
6071   // Saves the list of values that are used in the loop but are
6072   // defined outside the loop, such as arguments and constants.
6073   SmallPtrSet<Value *, 8> LoopInvariants;
6074 
6075   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6076     for (Instruction &I : BB->instructionsWithoutDebug()) {
6077       IdxToInstr.push_back(&I);
6078 
6079       // Save the end location of each USE.
6080       for (Value *U : I.operands()) {
6081         auto *Instr = dyn_cast<Instruction>(U);
6082 
6083         // Ignore non-instruction values such as arguments, constants, etc.
6084         if (!Instr)
6085           continue;
6086 
6087         // If this instruction is outside the loop then record it and continue.
6088         if (!TheLoop->contains(Instr)) {
6089           LoopInvariants.insert(Instr);
6090           continue;
6091         }
6092 
6093         // Overwrite previous end points.
6094         EndPoint[Instr] = IdxToInstr.size();
6095         Ends.insert(Instr);
6096       }
6097     }
6098   }
6099 
6100   // Saves the list of intervals that end with the index in 'key'.
6101   using InstrList = SmallVector<Instruction *, 2>;
6102   DenseMap<unsigned, InstrList> TransposeEnds;
6103 
6104   // Transpose the EndPoints to a list of values that end at each index.
6105   for (auto &Interval : EndPoint)
6106     TransposeEnds[Interval.second].push_back(Interval.first);
6107 
6108   SmallPtrSet<Instruction *, 8> OpenIntervals;
6109   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6110   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6111 
6112   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6113 
6114   // A lambda that gets the register usage for the given type and VF.
6115   const auto &TTICapture = TTI;
6116   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6117     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6118       return 0;
6119     InstructionCost::CostType RegUsage =
6120         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6121     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6122            "Nonsensical values for register usage.");
6123     return RegUsage;
6124   };
6125 
6126   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6127     Instruction *I = IdxToInstr[i];
6128 
6129     // Remove all of the instructions that end at this location.
6130     InstrList &List = TransposeEnds[i];
6131     for (Instruction *ToRemove : List)
6132       OpenIntervals.erase(ToRemove);
6133 
6134     // Ignore instructions that are never used within the loop.
6135     if (!Ends.count(I))
6136       continue;
6137 
6138     // Skip ignored values.
6139     if (ValuesToIgnore.count(I))
6140       continue;
6141 
6142     // For each VF find the maximum usage of registers.
6143     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6144       // Count the number of live intervals.
6145       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6146 
6147       if (VFs[j].isScalar()) {
6148         for (auto Inst : OpenIntervals) {
6149           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6150           if (RegUsage.find(ClassID) == RegUsage.end())
6151             RegUsage[ClassID] = 1;
6152           else
6153             RegUsage[ClassID] += 1;
6154         }
6155       } else {
6156         collectUniformsAndScalars(VFs[j]);
6157         for (auto Inst : OpenIntervals) {
6158           // Skip ignored values for VF > 1.
6159           if (VecValuesToIgnore.count(Inst))
6160             continue;
6161           if (isScalarAfterVectorization(Inst, VFs[j])) {
6162             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6163             if (RegUsage.find(ClassID) == RegUsage.end())
6164               RegUsage[ClassID] = 1;
6165             else
6166               RegUsage[ClassID] += 1;
6167           } else {
6168             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6169             if (RegUsage.find(ClassID) == RegUsage.end())
6170               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6171             else
6172               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6173           }
6174         }
6175       }
6176 
6177       for (auto& pair : RegUsage) {
6178         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6179           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6180         else
6181           MaxUsages[j][pair.first] = pair.second;
6182       }
6183     }
6184 
6185     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6186                       << OpenIntervals.size() << '\n');
6187 
6188     // Add the current instruction to the list of open intervals.
6189     OpenIntervals.insert(I);
6190   }
6191 
6192   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6193     SmallMapVector<unsigned, unsigned, 4> Invariant;
6194 
6195     for (auto Inst : LoopInvariants) {
6196       unsigned Usage =
6197           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6198       unsigned ClassID =
6199           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6200       if (Invariant.find(ClassID) == Invariant.end())
6201         Invariant[ClassID] = Usage;
6202       else
6203         Invariant[ClassID] += Usage;
6204     }
6205 
6206     LLVM_DEBUG({
6207       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6208       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6209              << " item\n";
6210       for (const auto &pair : MaxUsages[i]) {
6211         dbgs() << "LV(REG): RegisterClass: "
6212                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6213                << " registers\n";
6214       }
6215       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6216              << " item\n";
6217       for (const auto &pair : Invariant) {
6218         dbgs() << "LV(REG): RegisterClass: "
6219                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6220                << " registers\n";
6221       }
6222     });
6223 
6224     RU.LoopInvariantRegs = Invariant;
6225     RU.MaxLocalUsers = MaxUsages[i];
6226     RUs[i] = RU;
6227   }
6228 
6229   return RUs;
6230 }
6231 
6232 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6233                                                            ElementCount VF) {
6234   // TODO: Cost model for emulated masked load/store is completely
6235   // broken. This hack guides the cost model to use an artificially
6236   // high enough value to practically disable vectorization with such
6237   // operations, except where previously deployed legality hack allowed
6238   // using very low cost values. This is to avoid regressions coming simply
6239   // from moving "masked load/store" check from legality to cost model.
6240   // Masked Load/Gather emulation was previously never allowed.
6241   // Limited number of Masked Store/Scatter emulation was allowed.
6242   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6243   return isa<LoadInst>(I) ||
6244          (isa<StoreInst>(I) &&
6245           NumPredStores > NumberOfStoresToPredicate);
6246 }
6247 
6248 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6249   // If we aren't vectorizing the loop, or if we've already collected the
6250   // instructions to scalarize, there's nothing to do. Collection may already
6251   // have occurred if we have a user-selected VF and are now computing the
6252   // expected cost for interleaving.
6253   if (VF.isScalar() || VF.isZero() ||
6254       InstsToScalarize.find(VF) != InstsToScalarize.end())
6255     return;
6256 
6257   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6258   // not profitable to scalarize any instructions, the presence of VF in the
6259   // map will indicate that we've analyzed it already.
6260   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6261 
6262   // Find all the instructions that are scalar with predication in the loop and
6263   // determine if it would be better to not if-convert the blocks they are in.
6264   // If so, we also record the instructions to scalarize.
6265   for (BasicBlock *BB : TheLoop->blocks()) {
6266     if (!blockNeedsPredicationForAnyReason(BB))
6267       continue;
6268     for (Instruction &I : *BB)
6269       if (isScalarWithPredication(&I, VF)) {
6270         ScalarCostsTy ScalarCosts;
6271         // Do not apply discount if scalable, because that would lead to
6272         // invalid scalarization costs.
6273         // Do not apply discount logic if hacked cost is needed
6274         // for emulated masked memrefs.
6275         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6276             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6277           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6278         // Remember that BB will remain after vectorization.
6279         PredicatedBBsAfterVectorization.insert(BB);
6280       }
6281   }
6282 }
6283 
6284 int LoopVectorizationCostModel::computePredInstDiscount(
6285     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6286   assert(!isUniformAfterVectorization(PredInst, VF) &&
6287          "Instruction marked uniform-after-vectorization will be predicated");
6288 
6289   // Initialize the discount to zero, meaning that the scalar version and the
6290   // vector version cost the same.
6291   InstructionCost Discount = 0;
6292 
6293   // Holds instructions to analyze. The instructions we visit are mapped in
6294   // ScalarCosts. Those instructions are the ones that would be scalarized if
6295   // we find that the scalar version costs less.
6296   SmallVector<Instruction *, 8> Worklist;
6297 
6298   // Returns true if the given instruction can be scalarized.
6299   auto canBeScalarized = [&](Instruction *I) -> bool {
6300     // We only attempt to scalarize instructions forming a single-use chain
6301     // from the original predicated block that would otherwise be vectorized.
6302     // Although not strictly necessary, we give up on instructions we know will
6303     // already be scalar to avoid traversing chains that are unlikely to be
6304     // beneficial.
6305     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6306         isScalarAfterVectorization(I, VF))
6307       return false;
6308 
6309     // If the instruction is scalar with predication, it will be analyzed
6310     // separately. We ignore it within the context of PredInst.
6311     if (isScalarWithPredication(I, VF))
6312       return false;
6313 
6314     // If any of the instruction's operands are uniform after vectorization,
6315     // the instruction cannot be scalarized. This prevents, for example, a
6316     // masked load from being scalarized.
6317     //
6318     // We assume we will only emit a value for lane zero of an instruction
6319     // marked uniform after vectorization, rather than VF identical values.
6320     // Thus, if we scalarize an instruction that uses a uniform, we would
6321     // create uses of values corresponding to the lanes we aren't emitting code
6322     // for. This behavior can be changed by allowing getScalarValue to clone
6323     // the lane zero values for uniforms rather than asserting.
6324     for (Use &U : I->operands())
6325       if (auto *J = dyn_cast<Instruction>(U.get()))
6326         if (isUniformAfterVectorization(J, VF))
6327           return false;
6328 
6329     // Otherwise, we can scalarize the instruction.
6330     return true;
6331   };
6332 
6333   // Compute the expected cost discount from scalarizing the entire expression
6334   // feeding the predicated instruction. We currently only consider expressions
6335   // that are single-use instruction chains.
6336   Worklist.push_back(PredInst);
6337   while (!Worklist.empty()) {
6338     Instruction *I = Worklist.pop_back_val();
6339 
6340     // If we've already analyzed the instruction, there's nothing to do.
6341     if (ScalarCosts.find(I) != ScalarCosts.end())
6342       continue;
6343 
6344     // Compute the cost of the vector instruction. Note that this cost already
6345     // includes the scalarization overhead of the predicated instruction.
6346     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6347 
6348     // Compute the cost of the scalarized instruction. This cost is the cost of
6349     // the instruction as if it wasn't if-converted and instead remained in the
6350     // predicated block. We will scale this cost by block probability after
6351     // computing the scalarization overhead.
6352     InstructionCost ScalarCost =
6353         VF.getFixedValue() *
6354         getInstructionCost(I, ElementCount::getFixed(1)).first;
6355 
6356     // Compute the scalarization overhead of needed insertelement instructions
6357     // and phi nodes.
6358     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6359       ScalarCost += TTI.getScalarizationOverhead(
6360           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6361           APInt::getAllOnes(VF.getFixedValue()), true, false);
6362       ScalarCost +=
6363           VF.getFixedValue() *
6364           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6365     }
6366 
6367     // Compute the scalarization overhead of needed extractelement
6368     // instructions. For each of the instruction's operands, if the operand can
6369     // be scalarized, add it to the worklist; otherwise, account for the
6370     // overhead.
6371     for (Use &U : I->operands())
6372       if (auto *J = dyn_cast<Instruction>(U.get())) {
6373         assert(VectorType::isValidElementType(J->getType()) &&
6374                "Instruction has non-scalar type");
6375         if (canBeScalarized(J))
6376           Worklist.push_back(J);
6377         else if (needsExtract(J, VF)) {
6378           ScalarCost += TTI.getScalarizationOverhead(
6379               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6380               APInt::getAllOnes(VF.getFixedValue()), false, true);
6381         }
6382       }
6383 
6384     // Scale the total scalar cost by block probability.
6385     ScalarCost /= getReciprocalPredBlockProb();
6386 
6387     // Compute the discount. A non-negative discount means the vector version
6388     // of the instruction costs more, and scalarizing would be beneficial.
6389     Discount += VectorCost - ScalarCost;
6390     ScalarCosts[I] = ScalarCost;
6391   }
6392 
6393   return *Discount.getValue();
6394 }
6395 
6396 LoopVectorizationCostModel::VectorizationCostTy
6397 LoopVectorizationCostModel::expectedCost(
6398     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6399   VectorizationCostTy Cost;
6400 
6401   // For each block.
6402   for (BasicBlock *BB : TheLoop->blocks()) {
6403     VectorizationCostTy BlockCost;
6404 
6405     // For each instruction in the old loop.
6406     for (Instruction &I : BB->instructionsWithoutDebug()) {
6407       // Skip ignored values.
6408       if (ValuesToIgnore.count(&I) ||
6409           (VF.isVector() && VecValuesToIgnore.count(&I)))
6410         continue;
6411 
6412       VectorizationCostTy C = getInstructionCost(&I, VF);
6413 
6414       // Check if we should override the cost.
6415       if (C.first.isValid() &&
6416           ForceTargetInstructionCost.getNumOccurrences() > 0)
6417         C.first = InstructionCost(ForceTargetInstructionCost);
6418 
6419       // Keep a list of instructions with invalid costs.
6420       if (Invalid && !C.first.isValid())
6421         Invalid->emplace_back(&I, VF);
6422 
6423       BlockCost.first += C.first;
6424       BlockCost.second |= C.second;
6425       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6426                         << " for VF " << VF << " For instruction: " << I
6427                         << '\n');
6428     }
6429 
6430     // If we are vectorizing a predicated block, it will have been
6431     // if-converted. This means that the block's instructions (aside from
6432     // stores and instructions that may divide by zero) will now be
6433     // unconditionally executed. For the scalar case, we may not always execute
6434     // the predicated block, if it is an if-else block. Thus, scale the block's
6435     // cost by the probability of executing it. blockNeedsPredication from
6436     // Legal is used so as to not include all blocks in tail folded loops.
6437     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6438       BlockCost.first /= getReciprocalPredBlockProb();
6439 
6440     Cost.first += BlockCost.first;
6441     Cost.second |= BlockCost.second;
6442   }
6443 
6444   return Cost;
6445 }
6446 
6447 /// Gets Address Access SCEV after verifying that the access pattern
6448 /// is loop invariant except the induction variable dependence.
6449 ///
6450 /// This SCEV can be sent to the Target in order to estimate the address
6451 /// calculation cost.
6452 static const SCEV *getAddressAccessSCEV(
6453               Value *Ptr,
6454               LoopVectorizationLegality *Legal,
6455               PredicatedScalarEvolution &PSE,
6456               const Loop *TheLoop) {
6457 
6458   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6459   if (!Gep)
6460     return nullptr;
6461 
6462   // We are looking for a gep with all loop invariant indices except for one
6463   // which should be an induction variable.
6464   auto SE = PSE.getSE();
6465   unsigned NumOperands = Gep->getNumOperands();
6466   for (unsigned i = 1; i < NumOperands; ++i) {
6467     Value *Opd = Gep->getOperand(i);
6468     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6469         !Legal->isInductionVariable(Opd))
6470       return nullptr;
6471   }
6472 
6473   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6474   return PSE.getSCEV(Ptr);
6475 }
6476 
6477 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6478   return Legal->hasStride(I->getOperand(0)) ||
6479          Legal->hasStride(I->getOperand(1));
6480 }
6481 
6482 InstructionCost
6483 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6484                                                         ElementCount VF) {
6485   assert(VF.isVector() &&
6486          "Scalarization cost of instruction implies vectorization.");
6487   if (VF.isScalable())
6488     return InstructionCost::getInvalid();
6489 
6490   Type *ValTy = getLoadStoreType(I);
6491   auto SE = PSE.getSE();
6492 
6493   unsigned AS = getLoadStoreAddressSpace(I);
6494   Value *Ptr = getLoadStorePointerOperand(I);
6495   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6496   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6497   //       that it is being called from this specific place.
6498 
6499   // Figure out whether the access is strided and get the stride value
6500   // if it's known in compile time
6501   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6502 
6503   // Get the cost of the scalar memory instruction and address computation.
6504   InstructionCost Cost =
6505       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6506 
6507   // Don't pass *I here, since it is scalar but will actually be part of a
6508   // vectorized loop where the user of it is a vectorized instruction.
6509   const Align Alignment = getLoadStoreAlignment(I);
6510   Cost += VF.getKnownMinValue() *
6511           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6512                               AS, TTI::TCK_RecipThroughput);
6513 
6514   // Get the overhead of the extractelement and insertelement instructions
6515   // we might create due to scalarization.
6516   Cost += getScalarizationOverhead(I, VF);
6517 
6518   // If we have a predicated load/store, it will need extra i1 extracts and
6519   // conditional branches, but may not be executed for each vector lane. Scale
6520   // the cost by the probability of executing the predicated block.
6521   if (isPredicatedInst(I, VF)) {
6522     Cost /= getReciprocalPredBlockProb();
6523 
6524     // Add the cost of an i1 extract and a branch
6525     auto *Vec_i1Ty =
6526         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6527     Cost += TTI.getScalarizationOverhead(
6528         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6529         /*Insert=*/false, /*Extract=*/true);
6530     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6531 
6532     if (useEmulatedMaskMemRefHack(I, VF))
6533       // Artificially setting to a high enough value to practically disable
6534       // vectorization with such operations.
6535       Cost = 3000000;
6536   }
6537 
6538   return Cost;
6539 }
6540 
6541 InstructionCost
6542 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6543                                                     ElementCount VF) {
6544   Type *ValTy = getLoadStoreType(I);
6545   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6546   Value *Ptr = getLoadStorePointerOperand(I);
6547   unsigned AS = getLoadStoreAddressSpace(I);
6548   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6549   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6550 
6551   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6552          "Stride should be 1 or -1 for consecutive memory access");
6553   const Align Alignment = getLoadStoreAlignment(I);
6554   InstructionCost Cost = 0;
6555   if (Legal->isMaskRequired(I))
6556     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6557                                       CostKind);
6558   else
6559     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6560                                 CostKind, I);
6561 
6562   bool Reverse = ConsecutiveStride < 0;
6563   if (Reverse)
6564     Cost +=
6565         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6566   return Cost;
6567 }
6568 
6569 InstructionCost
6570 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6571                                                 ElementCount VF) {
6572   assert(Legal->isUniformMemOp(*I));
6573 
6574   Type *ValTy = getLoadStoreType(I);
6575   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6576   const Align Alignment = getLoadStoreAlignment(I);
6577   unsigned AS = getLoadStoreAddressSpace(I);
6578   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6579   if (isa<LoadInst>(I)) {
6580     return TTI.getAddressComputationCost(ValTy) +
6581            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6582                                CostKind) +
6583            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6584   }
6585   StoreInst *SI = cast<StoreInst>(I);
6586 
6587   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6588   return TTI.getAddressComputationCost(ValTy) +
6589          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6590                              CostKind) +
6591          (isLoopInvariantStoreValue
6592               ? 0
6593               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6594                                        VF.getKnownMinValue() - 1));
6595 }
6596 
6597 InstructionCost
6598 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6599                                                  ElementCount VF) {
6600   Type *ValTy = getLoadStoreType(I);
6601   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6602   const Align Alignment = getLoadStoreAlignment(I);
6603   const Value *Ptr = getLoadStorePointerOperand(I);
6604 
6605   return TTI.getAddressComputationCost(VectorTy) +
6606          TTI.getGatherScatterOpCost(
6607              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6608              TargetTransformInfo::TCK_RecipThroughput, I);
6609 }
6610 
6611 InstructionCost
6612 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6613                                                    ElementCount VF) {
6614   // TODO: Once we have support for interleaving with scalable vectors
6615   // we can calculate the cost properly here.
6616   if (VF.isScalable())
6617     return InstructionCost::getInvalid();
6618 
6619   Type *ValTy = getLoadStoreType(I);
6620   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6621   unsigned AS = getLoadStoreAddressSpace(I);
6622 
6623   auto Group = getInterleavedAccessGroup(I);
6624   assert(Group && "Fail to get an interleaved access group.");
6625 
6626   unsigned InterleaveFactor = Group->getFactor();
6627   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6628 
6629   // Holds the indices of existing members in the interleaved group.
6630   SmallVector<unsigned, 4> Indices;
6631   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6632     if (Group->getMember(IF))
6633       Indices.push_back(IF);
6634 
6635   // Calculate the cost of the whole interleaved group.
6636   bool UseMaskForGaps =
6637       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6638       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6639   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6640       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6641       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6642 
6643   if (Group->isReverse()) {
6644     // TODO: Add support for reversed masked interleaved access.
6645     assert(!Legal->isMaskRequired(I) &&
6646            "Reverse masked interleaved access not supported.");
6647     Cost +=
6648         Group->getNumMembers() *
6649         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6650   }
6651   return Cost;
6652 }
6653 
6654 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6655     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6656   using namespace llvm::PatternMatch;
6657   // Early exit for no inloop reductions
6658   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6659     return None;
6660   auto *VectorTy = cast<VectorType>(Ty);
6661 
6662   // We are looking for a pattern of, and finding the minimal acceptable cost:
6663   //  reduce(mul(ext(A), ext(B))) or
6664   //  reduce(mul(A, B)) or
6665   //  reduce(ext(A)) or
6666   //  reduce(A).
6667   // The basic idea is that we walk down the tree to do that, finding the root
6668   // reduction instruction in InLoopReductionImmediateChains. From there we find
6669   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6670   // of the components. If the reduction cost is lower then we return it for the
6671   // reduction instruction and 0 for the other instructions in the pattern. If
6672   // it is not we return an invalid cost specifying the orignal cost method
6673   // should be used.
6674   Instruction *RetI = I;
6675   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6676     if (!RetI->hasOneUser())
6677       return None;
6678     RetI = RetI->user_back();
6679   }
6680   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6681       RetI->user_back()->getOpcode() == Instruction::Add) {
6682     if (!RetI->hasOneUser())
6683       return None;
6684     RetI = RetI->user_back();
6685   }
6686 
6687   // Test if the found instruction is a reduction, and if not return an invalid
6688   // cost specifying the parent to use the original cost modelling.
6689   if (!InLoopReductionImmediateChains.count(RetI))
6690     return None;
6691 
6692   // Find the reduction this chain is a part of and calculate the basic cost of
6693   // the reduction on its own.
6694   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6695   Instruction *ReductionPhi = LastChain;
6696   while (!isa<PHINode>(ReductionPhi))
6697     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6698 
6699   const RecurrenceDescriptor &RdxDesc =
6700       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6701 
6702   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6703       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6704 
6705   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6706   // normal fmul instruction to the cost of the fadd reduction.
6707   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6708     BaseCost +=
6709         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6710 
6711   // If we're using ordered reductions then we can just return the base cost
6712   // here, since getArithmeticReductionCost calculates the full ordered
6713   // reduction cost when FP reassociation is not allowed.
6714   if (useOrderedReductions(RdxDesc))
6715     return BaseCost;
6716 
6717   // Get the operand that was not the reduction chain and match it to one of the
6718   // patterns, returning the better cost if it is found.
6719   Instruction *RedOp = RetI->getOperand(1) == LastChain
6720                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6721                            : dyn_cast<Instruction>(RetI->getOperand(1));
6722 
6723   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6724 
6725   Instruction *Op0, *Op1;
6726   if (RedOp &&
6727       match(RedOp,
6728             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6729       match(Op0, m_ZExtOrSExt(m_Value())) &&
6730       Op0->getOpcode() == Op1->getOpcode() &&
6731       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6732       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6733       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6734 
6735     // Matched reduce(ext(mul(ext(A), ext(B)))
6736     // Note that the extend opcodes need to all match, or if A==B they will have
6737     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6738     // which is equally fine.
6739     bool IsUnsigned = isa<ZExtInst>(Op0);
6740     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6741     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6742 
6743     InstructionCost ExtCost =
6744         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6745                              TTI::CastContextHint::None, CostKind, Op0);
6746     InstructionCost MulCost =
6747         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6748     InstructionCost Ext2Cost =
6749         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6750                              TTI::CastContextHint::None, CostKind, RedOp);
6751 
6752     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6753         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6754         CostKind);
6755 
6756     if (RedCost.isValid() &&
6757         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6758       return I == RetI ? RedCost : 0;
6759   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6760              !TheLoop->isLoopInvariant(RedOp)) {
6761     // Matched reduce(ext(A))
6762     bool IsUnsigned = isa<ZExtInst>(RedOp);
6763     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6764     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6765         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6766         CostKind);
6767 
6768     InstructionCost ExtCost =
6769         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6770                              TTI::CastContextHint::None, CostKind, RedOp);
6771     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6772       return I == RetI ? RedCost : 0;
6773   } else if (RedOp &&
6774              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6775     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6776         Op0->getOpcode() == Op1->getOpcode() &&
6777         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6778       bool IsUnsigned = isa<ZExtInst>(Op0);
6779       Type *Op0Ty = Op0->getOperand(0)->getType();
6780       Type *Op1Ty = Op1->getOperand(0)->getType();
6781       Type *LargestOpTy =
6782           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6783                                                                     : Op0Ty;
6784       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6785 
6786       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6787       // different sizes. We take the largest type as the ext to reduce, and add
6788       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6789       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6790           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6791           TTI::CastContextHint::None, CostKind, Op0);
6792       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6793           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6794           TTI::CastContextHint::None, CostKind, Op1);
6795       InstructionCost MulCost =
6796           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6797 
6798       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6799           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6800           CostKind);
6801       InstructionCost ExtraExtCost = 0;
6802       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6803         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6804         ExtraExtCost = TTI.getCastInstrCost(
6805             ExtraExtOp->getOpcode(), ExtType,
6806             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
6807             TTI::CastContextHint::None, CostKind, ExtraExtOp);
6808       }
6809 
6810       if (RedCost.isValid() &&
6811           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6812         return I == RetI ? RedCost : 0;
6813     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
6814       // Matched reduce(mul())
6815       InstructionCost MulCost =
6816           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6817 
6818       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6819           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6820           CostKind);
6821 
6822       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6823         return I == RetI ? RedCost : 0;
6824     }
6825   }
6826 
6827   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
6828 }
6829 
6830 InstructionCost
6831 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6832                                                      ElementCount VF) {
6833   // Calculate scalar cost only. Vectorization cost should be ready at this
6834   // moment.
6835   if (VF.isScalar()) {
6836     Type *ValTy = getLoadStoreType(I);
6837     const Align Alignment = getLoadStoreAlignment(I);
6838     unsigned AS = getLoadStoreAddressSpace(I);
6839 
6840     return TTI.getAddressComputationCost(ValTy) +
6841            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6842                                TTI::TCK_RecipThroughput, I);
6843   }
6844   return getWideningCost(I, VF);
6845 }
6846 
6847 LoopVectorizationCostModel::VectorizationCostTy
6848 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6849                                                ElementCount VF) {
6850   // If we know that this instruction will remain uniform, check the cost of
6851   // the scalar version.
6852   if (isUniformAfterVectorization(I, VF))
6853     VF = ElementCount::getFixed(1);
6854 
6855   if (VF.isVector() && isProfitableToScalarize(I, VF))
6856     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6857 
6858   // Forced scalars do not have any scalarization overhead.
6859   auto ForcedScalar = ForcedScalars.find(VF);
6860   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6861     auto InstSet = ForcedScalar->second;
6862     if (InstSet.count(I))
6863       return VectorizationCostTy(
6864           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6865            VF.getKnownMinValue()),
6866           false);
6867   }
6868 
6869   Type *VectorTy;
6870   InstructionCost C = getInstructionCost(I, VF, VectorTy);
6871 
6872   bool TypeNotScalarized = false;
6873   if (VF.isVector() && VectorTy->isVectorTy()) {
6874     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
6875     if (NumParts)
6876       TypeNotScalarized = NumParts < VF.getKnownMinValue();
6877     else
6878       C = InstructionCost::getInvalid();
6879   }
6880   return VectorizationCostTy(C, TypeNotScalarized);
6881 }
6882 
6883 InstructionCost
6884 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6885                                                      ElementCount VF) const {
6886 
6887   // There is no mechanism yet to create a scalable scalarization loop,
6888   // so this is currently Invalid.
6889   if (VF.isScalable())
6890     return InstructionCost::getInvalid();
6891 
6892   if (VF.isScalar())
6893     return 0;
6894 
6895   InstructionCost Cost = 0;
6896   Type *RetTy = ToVectorTy(I->getType(), VF);
6897   if (!RetTy->isVoidTy() &&
6898       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6899     Cost += TTI.getScalarizationOverhead(
6900         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
6901         false);
6902 
6903   // Some targets keep addresses scalar.
6904   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6905     return Cost;
6906 
6907   // Some targets support efficient element stores.
6908   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6909     return Cost;
6910 
6911   // Collect operands to consider.
6912   CallInst *CI = dyn_cast<CallInst>(I);
6913   Instruction::op_range Ops = CI ? CI->args() : I->operands();
6914 
6915   // Skip operands that do not require extraction/scalarization and do not incur
6916   // any overhead.
6917   SmallVector<Type *> Tys;
6918   for (auto *V : filterExtractingOperands(Ops, VF))
6919     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
6920   return Cost + TTI.getOperandsScalarizationOverhead(
6921                     filterExtractingOperands(Ops, VF), Tys);
6922 }
6923 
6924 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6925   if (VF.isScalar())
6926     return;
6927   NumPredStores = 0;
6928   for (BasicBlock *BB : TheLoop->blocks()) {
6929     // For each instruction in the old loop.
6930     for (Instruction &I : *BB) {
6931       Value *Ptr =  getLoadStorePointerOperand(&I);
6932       if (!Ptr)
6933         continue;
6934 
6935       // TODO: We should generate better code and update the cost model for
6936       // predicated uniform stores. Today they are treated as any other
6937       // predicated store (see added test cases in
6938       // invariant-store-vectorization.ll).
6939       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
6940         NumPredStores++;
6941 
6942       if (Legal->isUniformMemOp(I)) {
6943         // TODO: Avoid replicating loads and stores instead of
6944         // relying on instcombine to remove them.
6945         // Load: Scalar load + broadcast
6946         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6947         InstructionCost Cost;
6948         if (isa<StoreInst>(&I) && VF.isScalable() &&
6949             isLegalGatherOrScatter(&I, VF)) {
6950           Cost = getGatherScatterCost(&I, VF);
6951           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
6952         } else {
6953           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
6954                  "Cannot yet scalarize uniform stores");
6955           Cost = getUniformMemOpCost(&I, VF);
6956           setWideningDecision(&I, VF, CM_Scalarize, Cost);
6957         }
6958         continue;
6959       }
6960 
6961       // We assume that widening is the best solution when possible.
6962       if (memoryInstructionCanBeWidened(&I, VF)) {
6963         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
6964         int ConsecutiveStride = Legal->isConsecutivePtr(
6965             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
6966         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6967                "Expected consecutive stride.");
6968         InstWidening Decision =
6969             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6970         setWideningDecision(&I, VF, Decision, Cost);
6971         continue;
6972       }
6973 
6974       // Choose between Interleaving, Gather/Scatter or Scalarization.
6975       InstructionCost InterleaveCost = InstructionCost::getInvalid();
6976       unsigned NumAccesses = 1;
6977       if (isAccessInterleaved(&I)) {
6978         auto Group = getInterleavedAccessGroup(&I);
6979         assert(Group && "Fail to get an interleaved access group.");
6980 
6981         // Make one decision for the whole group.
6982         if (getWideningDecision(&I, VF) != CM_Unknown)
6983           continue;
6984 
6985         NumAccesses = Group->getNumMembers();
6986         if (interleavedAccessCanBeWidened(&I, VF))
6987           InterleaveCost = getInterleaveGroupCost(&I, VF);
6988       }
6989 
6990       InstructionCost GatherScatterCost =
6991           isLegalGatherOrScatter(&I, VF)
6992               ? getGatherScatterCost(&I, VF) * NumAccesses
6993               : InstructionCost::getInvalid();
6994 
6995       InstructionCost ScalarizationCost =
6996           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6997 
6998       // Choose better solution for the current VF,
6999       // write down this decision and use it during vectorization.
7000       InstructionCost Cost;
7001       InstWidening Decision;
7002       if (InterleaveCost <= GatherScatterCost &&
7003           InterleaveCost < ScalarizationCost) {
7004         Decision = CM_Interleave;
7005         Cost = InterleaveCost;
7006       } else if (GatherScatterCost < ScalarizationCost) {
7007         Decision = CM_GatherScatter;
7008         Cost = GatherScatterCost;
7009       } else {
7010         Decision = CM_Scalarize;
7011         Cost = ScalarizationCost;
7012       }
7013       // If the instructions belongs to an interleave group, the whole group
7014       // receives the same decision. The whole group receives the cost, but
7015       // the cost will actually be assigned to one instruction.
7016       if (auto Group = getInterleavedAccessGroup(&I))
7017         setWideningDecision(Group, VF, Decision, Cost);
7018       else
7019         setWideningDecision(&I, VF, Decision, Cost);
7020     }
7021   }
7022 
7023   // Make sure that any load of address and any other address computation
7024   // remains scalar unless there is gather/scatter support. This avoids
7025   // inevitable extracts into address registers, and also has the benefit of
7026   // activating LSR more, since that pass can't optimize vectorized
7027   // addresses.
7028   if (TTI.prefersVectorizedAddressing())
7029     return;
7030 
7031   // Start with all scalar pointer uses.
7032   SmallPtrSet<Instruction *, 8> AddrDefs;
7033   for (BasicBlock *BB : TheLoop->blocks())
7034     for (Instruction &I : *BB) {
7035       Instruction *PtrDef =
7036         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7037       if (PtrDef && TheLoop->contains(PtrDef) &&
7038           getWideningDecision(&I, VF) != CM_GatherScatter)
7039         AddrDefs.insert(PtrDef);
7040     }
7041 
7042   // Add all instructions used to generate the addresses.
7043   SmallVector<Instruction *, 4> Worklist;
7044   append_range(Worklist, AddrDefs);
7045   while (!Worklist.empty()) {
7046     Instruction *I = Worklist.pop_back_val();
7047     for (auto &Op : I->operands())
7048       if (auto *InstOp = dyn_cast<Instruction>(Op))
7049         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7050             AddrDefs.insert(InstOp).second)
7051           Worklist.push_back(InstOp);
7052   }
7053 
7054   for (auto *I : AddrDefs) {
7055     if (isa<LoadInst>(I)) {
7056       // Setting the desired widening decision should ideally be handled in
7057       // by cost functions, but since this involves the task of finding out
7058       // if the loaded register is involved in an address computation, it is
7059       // instead changed here when we know this is the case.
7060       InstWidening Decision = getWideningDecision(I, VF);
7061       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7062         // Scalarize a widened load of address.
7063         setWideningDecision(
7064             I, VF, CM_Scalarize,
7065             (VF.getKnownMinValue() *
7066              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7067       else if (auto Group = getInterleavedAccessGroup(I)) {
7068         // Scalarize an interleave group of address loads.
7069         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7070           if (Instruction *Member = Group->getMember(I))
7071             setWideningDecision(
7072                 Member, VF, CM_Scalarize,
7073                 (VF.getKnownMinValue() *
7074                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7075         }
7076       }
7077     } else
7078       // Make sure I gets scalarized and a cost estimate without
7079       // scalarization overhead.
7080       ForcedScalars[VF].insert(I);
7081   }
7082 }
7083 
7084 InstructionCost
7085 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7086                                                Type *&VectorTy) {
7087   Type *RetTy = I->getType();
7088   if (canTruncateToMinimalBitwidth(I, VF))
7089     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7090   auto SE = PSE.getSE();
7091   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7092 
7093   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7094                                                 ElementCount VF) -> bool {
7095     if (VF.isScalar())
7096       return true;
7097 
7098     auto Scalarized = InstsToScalarize.find(VF);
7099     assert(Scalarized != InstsToScalarize.end() &&
7100            "VF not yet analyzed for scalarization profitability");
7101     return !Scalarized->second.count(I) &&
7102            llvm::all_of(I->users(), [&](User *U) {
7103              auto *UI = cast<Instruction>(U);
7104              return !Scalarized->second.count(UI);
7105            });
7106   };
7107   (void) hasSingleCopyAfterVectorization;
7108 
7109   if (isScalarAfterVectorization(I, VF)) {
7110     // With the exception of GEPs and PHIs, after scalarization there should
7111     // only be one copy of the instruction generated in the loop. This is
7112     // because the VF is either 1, or any instructions that need scalarizing
7113     // have already been dealt with by the the time we get here. As a result,
7114     // it means we don't have to multiply the instruction cost by VF.
7115     assert(I->getOpcode() == Instruction::GetElementPtr ||
7116            I->getOpcode() == Instruction::PHI ||
7117            (I->getOpcode() == Instruction::BitCast &&
7118             I->getType()->isPointerTy()) ||
7119            hasSingleCopyAfterVectorization(I, VF));
7120     VectorTy = RetTy;
7121   } else
7122     VectorTy = ToVectorTy(RetTy, VF);
7123 
7124   // TODO: We need to estimate the cost of intrinsic calls.
7125   switch (I->getOpcode()) {
7126   case Instruction::GetElementPtr:
7127     // We mark this instruction as zero-cost because the cost of GEPs in
7128     // vectorized code depends on whether the corresponding memory instruction
7129     // is scalarized or not. Therefore, we handle GEPs with the memory
7130     // instruction cost.
7131     return 0;
7132   case Instruction::Br: {
7133     // In cases of scalarized and predicated instructions, there will be VF
7134     // predicated blocks in the vectorized loop. Each branch around these
7135     // blocks requires also an extract of its vector compare i1 element.
7136     bool ScalarPredicatedBB = false;
7137     BranchInst *BI = cast<BranchInst>(I);
7138     if (VF.isVector() && BI->isConditional() &&
7139         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7140          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7141       ScalarPredicatedBB = true;
7142 
7143     if (ScalarPredicatedBB) {
7144       // Not possible to scalarize scalable vector with predicated instructions.
7145       if (VF.isScalable())
7146         return InstructionCost::getInvalid();
7147       // Return cost for branches around scalarized and predicated blocks.
7148       auto *Vec_i1Ty =
7149           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7150       return (
7151           TTI.getScalarizationOverhead(
7152               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7153           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7154     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7155       // The back-edge branch will remain, as will all scalar branches.
7156       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7157     else
7158       // This branch will be eliminated by if-conversion.
7159       return 0;
7160     // Note: We currently assume zero cost for an unconditional branch inside
7161     // a predicated block since it will become a fall-through, although we
7162     // may decide in the future to call TTI for all branches.
7163   }
7164   case Instruction::PHI: {
7165     auto *Phi = cast<PHINode>(I);
7166 
7167     // First-order recurrences are replaced by vector shuffles inside the loop.
7168     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7169     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7170       return TTI.getShuffleCost(
7171           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7172           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7173 
7174     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7175     // converted into select instructions. We require N - 1 selects per phi
7176     // node, where N is the number of incoming values.
7177     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7178       return (Phi->getNumIncomingValues() - 1) *
7179              TTI.getCmpSelInstrCost(
7180                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7181                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7182                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7183 
7184     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7185   }
7186   case Instruction::UDiv:
7187   case Instruction::SDiv:
7188   case Instruction::URem:
7189   case Instruction::SRem:
7190     // If we have a predicated instruction, it may not be executed for each
7191     // vector lane. Get the scalarization cost and scale this amount by the
7192     // probability of executing the predicated block. If the instruction is not
7193     // predicated, we fall through to the next case.
7194     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7195       InstructionCost Cost = 0;
7196 
7197       // These instructions have a non-void type, so account for the phi nodes
7198       // that we will create. This cost is likely to be zero. The phi node
7199       // cost, if any, should be scaled by the block probability because it
7200       // models a copy at the end of each predicated block.
7201       Cost += VF.getKnownMinValue() *
7202               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7203 
7204       // The cost of the non-predicated instruction.
7205       Cost += VF.getKnownMinValue() *
7206               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7207 
7208       // The cost of insertelement and extractelement instructions needed for
7209       // scalarization.
7210       Cost += getScalarizationOverhead(I, VF);
7211 
7212       // Scale the cost by the probability of executing the predicated blocks.
7213       // This assumes the predicated block for each vector lane is equally
7214       // likely.
7215       return Cost / getReciprocalPredBlockProb();
7216     }
7217     LLVM_FALLTHROUGH;
7218   case Instruction::Add:
7219   case Instruction::FAdd:
7220   case Instruction::Sub:
7221   case Instruction::FSub:
7222   case Instruction::Mul:
7223   case Instruction::FMul:
7224   case Instruction::FDiv:
7225   case Instruction::FRem:
7226   case Instruction::Shl:
7227   case Instruction::LShr:
7228   case Instruction::AShr:
7229   case Instruction::And:
7230   case Instruction::Or:
7231   case Instruction::Xor: {
7232     // Since we will replace the stride by 1 the multiplication should go away.
7233     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7234       return 0;
7235 
7236     // Detect reduction patterns
7237     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7238       return *RedCost;
7239 
7240     // Certain instructions can be cheaper to vectorize if they have a constant
7241     // second vector operand. One example of this are shifts on x86.
7242     Value *Op2 = I->getOperand(1);
7243     TargetTransformInfo::OperandValueProperties Op2VP;
7244     TargetTransformInfo::OperandValueKind Op2VK =
7245         TTI.getOperandInfo(Op2, Op2VP);
7246     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7247       Op2VK = TargetTransformInfo::OK_UniformValue;
7248 
7249     SmallVector<const Value *, 4> Operands(I->operand_values());
7250     return TTI.getArithmeticInstrCost(
7251         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7252         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7253   }
7254   case Instruction::FNeg: {
7255     return TTI.getArithmeticInstrCost(
7256         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7257         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7258         TargetTransformInfo::OP_None, I->getOperand(0), I);
7259   }
7260   case Instruction::Select: {
7261     SelectInst *SI = cast<SelectInst>(I);
7262     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7263     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7264 
7265     const Value *Op0, *Op1;
7266     using namespace llvm::PatternMatch;
7267     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7268                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7269       // select x, y, false --> x & y
7270       // select x, true, y --> x | y
7271       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7272       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7273       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7274       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7275       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7276               Op1->getType()->getScalarSizeInBits() == 1);
7277 
7278       SmallVector<const Value *, 2> Operands{Op0, Op1};
7279       return TTI.getArithmeticInstrCost(
7280           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7281           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7282     }
7283 
7284     Type *CondTy = SI->getCondition()->getType();
7285     if (!ScalarCond)
7286       CondTy = VectorType::get(CondTy, VF);
7287 
7288     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7289     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7290       Pred = Cmp->getPredicate();
7291     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7292                                   CostKind, I);
7293   }
7294   case Instruction::ICmp:
7295   case Instruction::FCmp: {
7296     Type *ValTy = I->getOperand(0)->getType();
7297     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7298     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7299       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7300     VectorTy = ToVectorTy(ValTy, VF);
7301     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7302                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7303                                   I);
7304   }
7305   case Instruction::Store:
7306   case Instruction::Load: {
7307     ElementCount Width = VF;
7308     if (Width.isVector()) {
7309       InstWidening Decision = getWideningDecision(I, Width);
7310       assert(Decision != CM_Unknown &&
7311              "CM decision should be taken at this point");
7312       if (Decision == CM_Scalarize)
7313         Width = ElementCount::getFixed(1);
7314     }
7315     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7316     return getMemoryInstructionCost(I, VF);
7317   }
7318   case Instruction::BitCast:
7319     if (I->getType()->isPointerTy())
7320       return 0;
7321     LLVM_FALLTHROUGH;
7322   case Instruction::ZExt:
7323   case Instruction::SExt:
7324   case Instruction::FPToUI:
7325   case Instruction::FPToSI:
7326   case Instruction::FPExt:
7327   case Instruction::PtrToInt:
7328   case Instruction::IntToPtr:
7329   case Instruction::SIToFP:
7330   case Instruction::UIToFP:
7331   case Instruction::Trunc:
7332   case Instruction::FPTrunc: {
7333     // Computes the CastContextHint from a Load/Store instruction.
7334     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7335       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7336              "Expected a load or a store!");
7337 
7338       if (VF.isScalar() || !TheLoop->contains(I))
7339         return TTI::CastContextHint::Normal;
7340 
7341       switch (getWideningDecision(I, VF)) {
7342       case LoopVectorizationCostModel::CM_GatherScatter:
7343         return TTI::CastContextHint::GatherScatter;
7344       case LoopVectorizationCostModel::CM_Interleave:
7345         return TTI::CastContextHint::Interleave;
7346       case LoopVectorizationCostModel::CM_Scalarize:
7347       case LoopVectorizationCostModel::CM_Widen:
7348         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7349                                         : TTI::CastContextHint::Normal;
7350       case LoopVectorizationCostModel::CM_Widen_Reverse:
7351         return TTI::CastContextHint::Reversed;
7352       case LoopVectorizationCostModel::CM_Unknown:
7353         llvm_unreachable("Instr did not go through cost modelling?");
7354       }
7355 
7356       llvm_unreachable("Unhandled case!");
7357     };
7358 
7359     unsigned Opcode = I->getOpcode();
7360     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7361     // For Trunc, the context is the only user, which must be a StoreInst.
7362     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7363       if (I->hasOneUse())
7364         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7365           CCH = ComputeCCH(Store);
7366     }
7367     // For Z/Sext, the context is the operand, which must be a LoadInst.
7368     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7369              Opcode == Instruction::FPExt) {
7370       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7371         CCH = ComputeCCH(Load);
7372     }
7373 
7374     // We optimize the truncation of induction variables having constant
7375     // integer steps. The cost of these truncations is the same as the scalar
7376     // operation.
7377     if (isOptimizableIVTruncate(I, VF)) {
7378       auto *Trunc = cast<TruncInst>(I);
7379       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7380                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7381     }
7382 
7383     // Detect reduction patterns
7384     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7385       return *RedCost;
7386 
7387     Type *SrcScalarTy = I->getOperand(0)->getType();
7388     Type *SrcVecTy =
7389         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7390     if (canTruncateToMinimalBitwidth(I, VF)) {
7391       // This cast is going to be shrunk. This may remove the cast or it might
7392       // turn it into slightly different cast. For example, if MinBW == 16,
7393       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7394       //
7395       // Calculate the modified src and dest types.
7396       Type *MinVecTy = VectorTy;
7397       if (Opcode == Instruction::Trunc) {
7398         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7399         VectorTy =
7400             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7401       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7402         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7403         VectorTy =
7404             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7405       }
7406     }
7407 
7408     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7409   }
7410   case Instruction::Call: {
7411     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7412       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7413         return *RedCost;
7414     bool NeedToScalarize;
7415     CallInst *CI = cast<CallInst>(I);
7416     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7417     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7418       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7419       return std::min(CallCost, IntrinsicCost);
7420     }
7421     return CallCost;
7422   }
7423   case Instruction::ExtractValue:
7424     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7425   case Instruction::Alloca:
7426     // We cannot easily widen alloca to a scalable alloca, as
7427     // the result would need to be a vector of pointers.
7428     if (VF.isScalable())
7429       return InstructionCost::getInvalid();
7430     LLVM_FALLTHROUGH;
7431   default:
7432     // This opcode is unknown. Assume that it is the same as 'mul'.
7433     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7434   } // end of switch.
7435 }
7436 
7437 char LoopVectorize::ID = 0;
7438 
7439 static const char lv_name[] = "Loop Vectorization";
7440 
7441 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7442 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7443 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7444 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7445 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7446 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7447 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7448 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7449 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7450 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7451 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7452 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7453 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7454 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7455 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7456 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7457 
7458 namespace llvm {
7459 
7460 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7461 
7462 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7463                               bool VectorizeOnlyWhenForced) {
7464   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7465 }
7466 
7467 } // end namespace llvm
7468 
7469 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7470   // Check if the pointer operand of a load or store instruction is
7471   // consecutive.
7472   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7473     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7474   return false;
7475 }
7476 
7477 void LoopVectorizationCostModel::collectValuesToIgnore() {
7478   // Ignore ephemeral values.
7479   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7480 
7481   // Ignore type-promoting instructions we identified during reduction
7482   // detection.
7483   for (auto &Reduction : Legal->getReductionVars()) {
7484     const RecurrenceDescriptor &RedDes = Reduction.second;
7485     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7486     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7487   }
7488   // Ignore type-casting instructions we identified during induction
7489   // detection.
7490   for (auto &Induction : Legal->getInductionVars()) {
7491     const InductionDescriptor &IndDes = Induction.second;
7492     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7493     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7494   }
7495 }
7496 
7497 void LoopVectorizationCostModel::collectInLoopReductions() {
7498   for (auto &Reduction : Legal->getReductionVars()) {
7499     PHINode *Phi = Reduction.first;
7500     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7501 
7502     // We don't collect reductions that are type promoted (yet).
7503     if (RdxDesc.getRecurrenceType() != Phi->getType())
7504       continue;
7505 
7506     // If the target would prefer this reduction to happen "in-loop", then we
7507     // want to record it as such.
7508     unsigned Opcode = RdxDesc.getOpcode();
7509     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7510         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7511                                    TargetTransformInfo::ReductionFlags()))
7512       continue;
7513 
7514     // Check that we can correctly put the reductions into the loop, by
7515     // finding the chain of operations that leads from the phi to the loop
7516     // exit value.
7517     SmallVector<Instruction *, 4> ReductionOperations =
7518         RdxDesc.getReductionOpChain(Phi, TheLoop);
7519     bool InLoop = !ReductionOperations.empty();
7520     if (InLoop) {
7521       InLoopReductionChains[Phi] = ReductionOperations;
7522       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7523       Instruction *LastChain = Phi;
7524       for (auto *I : ReductionOperations) {
7525         InLoopReductionImmediateChains[I] = LastChain;
7526         LastChain = I;
7527       }
7528     }
7529     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7530                       << " reduction for phi: " << *Phi << "\n");
7531   }
7532 }
7533 
7534 // TODO: we could return a pair of values that specify the max VF and
7535 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7536 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7537 // doesn't have a cost model that can choose which plan to execute if
7538 // more than one is generated.
7539 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7540                                  LoopVectorizationCostModel &CM) {
7541   unsigned WidestType;
7542   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7543   return WidestVectorRegBits / WidestType;
7544 }
7545 
7546 VectorizationFactor
7547 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7548   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7549   ElementCount VF = UserVF;
7550   // Outer loop handling: They may require CFG and instruction level
7551   // transformations before even evaluating whether vectorization is profitable.
7552   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7553   // the vectorization pipeline.
7554   if (!OrigLoop->isInnermost()) {
7555     // If the user doesn't provide a vectorization factor, determine a
7556     // reasonable one.
7557     if (UserVF.isZero()) {
7558       VF = ElementCount::getFixed(determineVPlanVF(
7559           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7560               .getFixedSize(),
7561           CM));
7562       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7563 
7564       // Make sure we have a VF > 1 for stress testing.
7565       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7566         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7567                           << "overriding computed VF.\n");
7568         VF = ElementCount::getFixed(4);
7569       }
7570     }
7571     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7572     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7573            "VF needs to be a power of two");
7574     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7575                       << "VF " << VF << " to build VPlans.\n");
7576     buildVPlans(VF, VF);
7577 
7578     // For VPlan build stress testing, we bail out after VPlan construction.
7579     if (VPlanBuildStressTest)
7580       return VectorizationFactor::Disabled();
7581 
7582     return {VF, 0 /*Cost*/};
7583   }
7584 
7585   LLVM_DEBUG(
7586       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7587                 "VPlan-native path.\n");
7588   return VectorizationFactor::Disabled();
7589 }
7590 
7591 Optional<VectorizationFactor>
7592 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7593   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7594   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7595   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7596     return None;
7597 
7598   // Invalidate interleave groups if all blocks of loop will be predicated.
7599   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7600       !useMaskedInterleavedAccesses(*TTI)) {
7601     LLVM_DEBUG(
7602         dbgs()
7603         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7604            "which requires masked-interleaved support.\n");
7605     if (CM.InterleaveInfo.invalidateGroups())
7606       // Invalidating interleave groups also requires invalidating all decisions
7607       // based on them, which includes widening decisions and uniform and scalar
7608       // values.
7609       CM.invalidateCostModelingDecisions();
7610   }
7611 
7612   ElementCount MaxUserVF =
7613       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7614   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7615   if (!UserVF.isZero() && UserVFIsLegal) {
7616     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7617            "VF needs to be a power of two");
7618     // Collect the instructions (and their associated costs) that will be more
7619     // profitable to scalarize.
7620     if (CM.selectUserVectorizationFactor(UserVF)) {
7621       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7622       CM.collectInLoopReductions();
7623       buildVPlansWithVPRecipes(UserVF, UserVF);
7624       LLVM_DEBUG(printPlans(dbgs()));
7625       return {{UserVF, 0}};
7626     } else
7627       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7628                               "InvalidCost", ORE, OrigLoop);
7629   }
7630 
7631   // Populate the set of Vectorization Factor Candidates.
7632   ElementCountSet VFCandidates;
7633   for (auto VF = ElementCount::getFixed(1);
7634        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7635     VFCandidates.insert(VF);
7636   for (auto VF = ElementCount::getScalable(1);
7637        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7638     VFCandidates.insert(VF);
7639 
7640   for (const auto &VF : VFCandidates) {
7641     // Collect Uniform and Scalar instructions after vectorization with VF.
7642     CM.collectUniformsAndScalars(VF);
7643 
7644     // Collect the instructions (and their associated costs) that will be more
7645     // profitable to scalarize.
7646     if (VF.isVector())
7647       CM.collectInstsToScalarize(VF);
7648   }
7649 
7650   CM.collectInLoopReductions();
7651   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7652   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7653 
7654   LLVM_DEBUG(printPlans(dbgs()));
7655   if (!MaxFactors.hasVector())
7656     return VectorizationFactor::Disabled();
7657 
7658   // Select the optimal vectorization factor.
7659   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7660 
7661   // Check if it is profitable to vectorize with runtime checks.
7662   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7663   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7664     bool PragmaThresholdReached =
7665         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7666     bool ThresholdReached =
7667         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7668     if ((ThresholdReached && !Hints.allowReordering()) ||
7669         PragmaThresholdReached) {
7670       ORE->emit([&]() {
7671         return OptimizationRemarkAnalysisAliasing(
7672                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7673                    OrigLoop->getHeader())
7674                << "loop not vectorized: cannot prove it is safe to reorder "
7675                   "memory operations";
7676       });
7677       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7678       Hints.emitRemarkWithHints();
7679       return VectorizationFactor::Disabled();
7680     }
7681   }
7682   return SelectedVF;
7683 }
7684 
7685 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7686   assert(count_if(VPlans,
7687                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7688              1 &&
7689          "Best VF has not a single VPlan.");
7690 
7691   for (const VPlanPtr &Plan : VPlans) {
7692     if (Plan->hasVF(VF))
7693       return *Plan.get();
7694   }
7695   llvm_unreachable("No plan found!");
7696 }
7697 
7698 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7699   SmallVector<Metadata *, 4> MDs;
7700   // Reserve first location for self reference to the LoopID metadata node.
7701   MDs.push_back(nullptr);
7702   bool IsUnrollMetadata = false;
7703   MDNode *LoopID = L->getLoopID();
7704   if (LoopID) {
7705     // First find existing loop unrolling disable metadata.
7706     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7707       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7708       if (MD) {
7709         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7710         IsUnrollMetadata =
7711             S && S->getString().startswith("llvm.loop.unroll.disable");
7712       }
7713       MDs.push_back(LoopID->getOperand(i));
7714     }
7715   }
7716 
7717   if (!IsUnrollMetadata) {
7718     // Add runtime unroll disable metadata.
7719     LLVMContext &Context = L->getHeader()->getContext();
7720     SmallVector<Metadata *, 1> DisableOperands;
7721     DisableOperands.push_back(
7722         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7723     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7724     MDs.push_back(DisableNode);
7725     MDNode *NewLoopID = MDNode::get(Context, MDs);
7726     // Set operand 0 to refer to the loop id itself.
7727     NewLoopID->replaceOperandWith(0, NewLoopID);
7728     L->setLoopID(NewLoopID);
7729   }
7730 }
7731 
7732 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7733                                            VPlan &BestVPlan,
7734                                            InnerLoopVectorizer &ILV,
7735                                            DominatorTree *DT) {
7736   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7737                     << '\n');
7738 
7739   // Perform the actual loop transformation.
7740 
7741   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7742   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7743   Value *CanonicalIVStartValue;
7744   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7745       ILV.createVectorizedLoopSkeleton();
7746   ILV.collectPoisonGeneratingRecipes(State);
7747 
7748   ILV.printDebugTracesAtStart();
7749 
7750   //===------------------------------------------------===//
7751   //
7752   // Notice: any optimization or new instruction that go
7753   // into the code below should also be implemented in
7754   // the cost-model.
7755   //
7756   //===------------------------------------------------===//
7757 
7758   // 2. Copy and widen instructions from the old loop into the new loop.
7759   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7760                              ILV.getOrCreateVectorTripCount(nullptr),
7761                              CanonicalIVStartValue, State);
7762   BestVPlan.execute(&State);
7763 
7764   // Keep all loop hints from the original loop on the vector loop (we'll
7765   // replace the vectorizer-specific hints below).
7766   MDNode *OrigLoopID = OrigLoop->getLoopID();
7767 
7768   Optional<MDNode *> VectorizedLoopID =
7769       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7770                                       LLVMLoopVectorizeFollowupVectorized});
7771 
7772   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7773   if (VectorizedLoopID.hasValue())
7774     L->setLoopID(VectorizedLoopID.getValue());
7775   else {
7776     // Keep all loop hints from the original loop on the vector loop (we'll
7777     // replace the vectorizer-specific hints below).
7778     if (MDNode *LID = OrigLoop->getLoopID())
7779       L->setLoopID(LID);
7780 
7781     LoopVectorizeHints Hints(L, true, *ORE);
7782     Hints.setAlreadyVectorized();
7783   }
7784   // Disable runtime unrolling when vectorizing the epilogue loop.
7785   if (CanonicalIVStartValue)
7786     AddRuntimeUnrollDisableMetaData(L);
7787 
7788   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7789   //    predication, updating analyses.
7790   ILV.fixVectorizedLoop(State);
7791 
7792   ILV.printDebugTracesAtEnd();
7793 }
7794 
7795 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7796 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7797   for (const auto &Plan : VPlans)
7798     if (PrintVPlansInDotFormat)
7799       Plan->printDOT(O);
7800     else
7801       Plan->print(O);
7802 }
7803 #endif
7804 
7805 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7806     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7807 
7808   // We create new control-flow for the vectorized loop, so the original exit
7809   // conditions will be dead after vectorization if it's only used by the
7810   // terminator
7811   SmallVector<BasicBlock*> ExitingBlocks;
7812   OrigLoop->getExitingBlocks(ExitingBlocks);
7813   for (auto *BB : ExitingBlocks) {
7814     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7815     if (!Cmp || !Cmp->hasOneUse())
7816       continue;
7817 
7818     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7819     if (!DeadInstructions.insert(Cmp).second)
7820       continue;
7821 
7822     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7823     // TODO: can recurse through operands in general
7824     for (Value *Op : Cmp->operands()) {
7825       if (isa<TruncInst>(Op) && Op->hasOneUse())
7826           DeadInstructions.insert(cast<Instruction>(Op));
7827     }
7828   }
7829 
7830   // We create new "steps" for induction variable updates to which the original
7831   // induction variables map. An original update instruction will be dead if
7832   // all its users except the induction variable are dead.
7833   auto *Latch = OrigLoop->getLoopLatch();
7834   for (auto &Induction : Legal->getInductionVars()) {
7835     PHINode *Ind = Induction.first;
7836     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7837 
7838     // If the tail is to be folded by masking, the primary induction variable,
7839     // if exists, isn't dead: it will be used for masking. Don't kill it.
7840     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7841       continue;
7842 
7843     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7844           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7845         }))
7846       DeadInstructions.insert(IndUpdate);
7847   }
7848 }
7849 
7850 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7851 
7852 //===--------------------------------------------------------------------===//
7853 // EpilogueVectorizerMainLoop
7854 //===--------------------------------------------------------------------===//
7855 
7856 /// This function is partially responsible for generating the control flow
7857 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7858 std::pair<BasicBlock *, Value *>
7859 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7860   MDNode *OrigLoopID = OrigLoop->getLoopID();
7861   Loop *Lp = createVectorLoopSkeleton("");
7862 
7863   // Generate the code to check the minimum iteration count of the vector
7864   // epilogue (see below).
7865   EPI.EpilogueIterationCountCheck =
7866       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
7867   EPI.EpilogueIterationCountCheck->setName("iter.check");
7868 
7869   // Generate the code to check any assumptions that we've made for SCEV
7870   // expressions.
7871   EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader);
7872 
7873   // Generate the code that checks at runtime if arrays overlap. We put the
7874   // checks into a separate block to make the more common case of few elements
7875   // faster.
7876   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
7877 
7878   // Generate the iteration count check for the main loop, *after* the check
7879   // for the epilogue loop, so that the path-length is shorter for the case
7880   // that goes directly through the vector epilogue. The longer-path length for
7881   // the main loop is compensated for, by the gain from vectorizing the larger
7882   // trip count. Note: the branch will get updated later on when we vectorize
7883   // the epilogue.
7884   EPI.MainLoopIterationCountCheck =
7885       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
7886 
7887   // Generate the induction variable.
7888   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7889   EPI.VectorTripCount = CountRoundDown;
7890   createHeaderBranch(Lp);
7891 
7892   // Skip induction resume value creation here because they will be created in
7893   // the second pass. If we created them here, they wouldn't be used anyway,
7894   // because the vplan in the second pass still contains the inductions from the
7895   // original loop.
7896 
7897   return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
7898 }
7899 
7900 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7901   LLVM_DEBUG({
7902     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7903            << "Main Loop VF:" << EPI.MainLoopVF
7904            << ", Main Loop UF:" << EPI.MainLoopUF
7905            << ", Epilogue Loop VF:" << EPI.EpilogueVF
7906            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7907   });
7908 }
7909 
7910 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7911   DEBUG_WITH_TYPE(VerboseDebug, {
7912     dbgs() << "intermediate fn:\n"
7913            << *OrigLoop->getHeader()->getParent() << "\n";
7914   });
7915 }
7916 
7917 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
7918     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
7919   assert(L && "Expected valid Loop.");
7920   assert(Bypass && "Expected valid bypass basic block.");
7921   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
7922   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
7923   Value *Count = getOrCreateTripCount(L);
7924   // Reuse existing vector loop preheader for TC checks.
7925   // Note that new preheader block is generated for vector loop.
7926   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
7927   IRBuilder<> Builder(TCCheckBlock->getTerminator());
7928 
7929   // Generate code to check if the loop's trip count is less than VF * UF of the
7930   // main vector loop.
7931   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
7932       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7933 
7934   Value *CheckMinIters = Builder.CreateICmp(
7935       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
7936       "min.iters.check");
7937 
7938   if (!ForEpilogue)
7939     TCCheckBlock->setName("vector.main.loop.iter.check");
7940 
7941   // Create new preheader for vector loop.
7942   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7943                                    DT, LI, nullptr, "vector.ph");
7944 
7945   if (ForEpilogue) {
7946     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
7947                                  DT->getNode(Bypass)->getIDom()) &&
7948            "TC check is expected to dominate Bypass");
7949 
7950     // Update dominator for Bypass & LoopExit.
7951     DT->changeImmediateDominator(Bypass, TCCheckBlock);
7952     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7953       // For loops with multiple exits, there's no edge from the middle block
7954       // to exit blocks (as the epilogue must run) and thus no need to update
7955       // the immediate dominator of the exit blocks.
7956       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
7957 
7958     LoopBypassBlocks.push_back(TCCheckBlock);
7959 
7960     // Save the trip count so we don't have to regenerate it in the
7961     // vec.epilog.iter.check. This is safe to do because the trip count
7962     // generated here dominates the vector epilog iter check.
7963     EPI.TripCount = Count;
7964   }
7965 
7966   ReplaceInstWithInst(
7967       TCCheckBlock->getTerminator(),
7968       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7969 
7970   return TCCheckBlock;
7971 }
7972 
7973 //===--------------------------------------------------------------------===//
7974 // EpilogueVectorizerEpilogueLoop
7975 //===--------------------------------------------------------------------===//
7976 
7977 /// This function is partially responsible for generating the control flow
7978 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7979 std::pair<BasicBlock *, Value *>
7980 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
7981   MDNode *OrigLoopID = OrigLoop->getLoopID();
7982   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
7983 
7984   // Now, compare the remaining count and if there aren't enough iterations to
7985   // execute the vectorized epilogue skip to the scalar part.
7986   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
7987   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
7988   LoopVectorPreHeader =
7989       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
7990                  LI, nullptr, "vec.epilog.ph");
7991   emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader,
7992                                           VecEpilogueIterationCountCheck);
7993 
7994   // Adjust the control flow taking the state info from the main loop
7995   // vectorization into account.
7996   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7997          "expected this to be saved from the previous pass.");
7998   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
7999       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8000 
8001   DT->changeImmediateDominator(LoopVectorPreHeader,
8002                                EPI.MainLoopIterationCountCheck);
8003 
8004   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8005       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8006 
8007   if (EPI.SCEVSafetyCheck)
8008     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8009         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8010   if (EPI.MemSafetyCheck)
8011     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8012         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8013 
8014   DT->changeImmediateDominator(
8015       VecEpilogueIterationCountCheck,
8016       VecEpilogueIterationCountCheck->getSinglePredecessor());
8017 
8018   DT->changeImmediateDominator(LoopScalarPreHeader,
8019                                EPI.EpilogueIterationCountCheck);
8020   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8021     // If there is an epilogue which must run, there's no edge from the
8022     // middle block to exit blocks  and thus no need to update the immediate
8023     // dominator of the exit blocks.
8024     DT->changeImmediateDominator(LoopExitBlock,
8025                                  EPI.EpilogueIterationCountCheck);
8026 
8027   // Keep track of bypass blocks, as they feed start values to the induction
8028   // phis in the scalar loop preheader.
8029   if (EPI.SCEVSafetyCheck)
8030     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8031   if (EPI.MemSafetyCheck)
8032     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8033   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8034 
8035   // The vec.epilog.iter.check block may contain Phi nodes from reductions which
8036   // merge control-flow from the latch block and the middle block. Update the
8037   // incoming values here and move the Phi into the preheader.
8038   SmallVector<PHINode *, 4> PhisInBlock;
8039   for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
8040     PhisInBlock.push_back(&Phi);
8041 
8042   for (PHINode *Phi : PhisInBlock) {
8043     Phi->replaceIncomingBlockWith(
8044         VecEpilogueIterationCountCheck->getSinglePredecessor(),
8045         VecEpilogueIterationCountCheck);
8046     Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
8047     if (EPI.SCEVSafetyCheck)
8048       Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
8049     if (EPI.MemSafetyCheck)
8050       Phi->removeIncomingValue(EPI.MemSafetyCheck);
8051     Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
8052   }
8053 
8054   // Generate a resume induction for the vector epilogue and put it in the
8055   // vector epilogue preheader
8056   Type *IdxTy = Legal->getWidestInductionType();
8057   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8058                                          LoopVectorPreHeader->getFirstNonPHI());
8059   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8060   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8061                            EPI.MainLoopIterationCountCheck);
8062 
8063   // Generate the induction variable.
8064   createHeaderBranch(Lp);
8065 
8066   // Generate induction resume values. These variables save the new starting
8067   // indexes for the scalar loop. They are used to test if there are any tail
8068   // iterations left once the vector loop has completed.
8069   // Note that when the vectorized epilogue is skipped due to iteration count
8070   // check, then the resume value for the induction variable comes from
8071   // the trip count of the main vector loop, hence passing the AdditionalBypass
8072   // argument.
8073   createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
8074                                    EPI.VectorTripCount} /* AdditionalBypass */);
8075 
8076   return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
8077 }
8078 
8079 BasicBlock *
8080 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8081     BasicBlock *Bypass, BasicBlock *Insert) {
8082 
8083   assert(EPI.TripCount &&
8084          "Expected trip count to have been safed in the first pass.");
8085   assert(
8086       (!isa<Instruction>(EPI.TripCount) ||
8087        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8088       "saved trip count does not dominate insertion point.");
8089   Value *TC = EPI.TripCount;
8090   IRBuilder<> Builder(Insert->getTerminator());
8091   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8092 
8093   // Generate code to check if the loop's trip count is less than VF * UF of the
8094   // vector epilogue loop.
8095   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8096       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8097 
8098   Value *CheckMinIters =
8099       Builder.CreateICmp(P, Count,
8100                          createStepForVF(Builder, Count->getType(),
8101                                          EPI.EpilogueVF, EPI.EpilogueUF),
8102                          "min.epilog.iters.check");
8103 
8104   ReplaceInstWithInst(
8105       Insert->getTerminator(),
8106       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8107 
8108   LoopBypassBlocks.push_back(Insert);
8109   return Insert;
8110 }
8111 
8112 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8113   LLVM_DEBUG({
8114     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8115            << "Epilogue Loop VF:" << EPI.EpilogueVF
8116            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8117   });
8118 }
8119 
8120 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8121   DEBUG_WITH_TYPE(VerboseDebug, {
8122     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8123   });
8124 }
8125 
8126 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8127     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8128   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8129   bool PredicateAtRangeStart = Predicate(Range.Start);
8130 
8131   for (ElementCount TmpVF = Range.Start * 2;
8132        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8133     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8134       Range.End = TmpVF;
8135       break;
8136     }
8137 
8138   return PredicateAtRangeStart;
8139 }
8140 
8141 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8142 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8143 /// of VF's starting at a given VF and extending it as much as possible. Each
8144 /// vectorization decision can potentially shorten this sub-range during
8145 /// buildVPlan().
8146 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8147                                            ElementCount MaxVF) {
8148   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8149   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8150     VFRange SubRange = {VF, MaxVFPlusOne};
8151     VPlans.push_back(buildVPlan(SubRange));
8152     VF = SubRange.End;
8153   }
8154 }
8155 
8156 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8157                                          VPlanPtr &Plan) {
8158   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8159 
8160   // Look for cached value.
8161   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8162   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8163   if (ECEntryIt != EdgeMaskCache.end())
8164     return ECEntryIt->second;
8165 
8166   VPValue *SrcMask = createBlockInMask(Src, Plan);
8167 
8168   // The terminator has to be a branch inst!
8169   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8170   assert(BI && "Unexpected terminator found");
8171 
8172   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8173     return EdgeMaskCache[Edge] = SrcMask;
8174 
8175   // If source is an exiting block, we know the exit edge is dynamically dead
8176   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8177   // adding uses of an otherwise potentially dead instruction.
8178   if (OrigLoop->isLoopExiting(Src))
8179     return EdgeMaskCache[Edge] = SrcMask;
8180 
8181   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8182   assert(EdgeMask && "No Edge Mask found for condition");
8183 
8184   if (BI->getSuccessor(0) != Dst)
8185     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8186 
8187   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8188     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8189     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8190     // The select version does not introduce new UB if SrcMask is false and
8191     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8192     VPValue *False = Plan->getOrAddVPValue(
8193         ConstantInt::getFalse(BI->getCondition()->getType()));
8194     EdgeMask =
8195         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8196   }
8197 
8198   return EdgeMaskCache[Edge] = EdgeMask;
8199 }
8200 
8201 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8202   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8203 
8204   // Look for cached value.
8205   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8206   if (BCEntryIt != BlockMaskCache.end())
8207     return BCEntryIt->second;
8208 
8209   // All-one mask is modelled as no-mask following the convention for masked
8210   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8211   VPValue *BlockMask = nullptr;
8212 
8213   if (OrigLoop->getHeader() == BB) {
8214     if (!CM.blockNeedsPredicationForAnyReason(BB))
8215       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8216 
8217     // Introduce the early-exit compare IV <= BTC to form header block mask.
8218     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8219     // constructing the desired canonical IV in the header block as its first
8220     // non-phi instructions.
8221     assert(CM.foldTailByMasking() && "must fold the tail");
8222     VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
8223     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8224     auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8225     HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
8226 
8227     VPBuilder::InsertPointGuard Guard(Builder);
8228     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8229     if (CM.TTI.emitGetActiveLaneMask()) {
8230       VPValue *TC = Plan->getOrCreateTripCount();
8231       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8232     } else {
8233       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8234       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8235     }
8236     return BlockMaskCache[BB] = BlockMask;
8237   }
8238 
8239   // This is the block mask. We OR all incoming edges.
8240   for (auto *Predecessor : predecessors(BB)) {
8241     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8242     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8243       return BlockMaskCache[BB] = EdgeMask;
8244 
8245     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8246       BlockMask = EdgeMask;
8247       continue;
8248     }
8249 
8250     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8251   }
8252 
8253   return BlockMaskCache[BB] = BlockMask;
8254 }
8255 
8256 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8257                                                 ArrayRef<VPValue *> Operands,
8258                                                 VFRange &Range,
8259                                                 VPlanPtr &Plan) {
8260   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8261          "Must be called with either a load or store");
8262 
8263   auto willWiden = [&](ElementCount VF) -> bool {
8264     if (VF.isScalar())
8265       return false;
8266     LoopVectorizationCostModel::InstWidening Decision =
8267         CM.getWideningDecision(I, VF);
8268     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8269            "CM decision should be taken at this point.");
8270     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8271       return true;
8272     if (CM.isScalarAfterVectorization(I, VF) ||
8273         CM.isProfitableToScalarize(I, VF))
8274       return false;
8275     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8276   };
8277 
8278   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8279     return nullptr;
8280 
8281   VPValue *Mask = nullptr;
8282   if (Legal->isMaskRequired(I))
8283     Mask = createBlockInMask(I->getParent(), Plan);
8284 
8285   // Determine if the pointer operand of the access is either consecutive or
8286   // reverse consecutive.
8287   LoopVectorizationCostModel::InstWidening Decision =
8288       CM.getWideningDecision(I, Range.Start);
8289   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8290   bool Consecutive =
8291       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8292 
8293   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8294     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8295                                               Consecutive, Reverse);
8296 
8297   StoreInst *Store = cast<StoreInst>(I);
8298   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8299                                             Mask, Consecutive, Reverse);
8300 }
8301 
8302 static VPWidenIntOrFpInductionRecipe *
8303 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc,
8304                            VPValue *Start, const InductionDescriptor &IndDesc,
8305                            LoopVectorizationCostModel &CM, ScalarEvolution &SE,
8306                            Loop &OrigLoop, VFRange &Range) {
8307   // Returns true if an instruction \p I should be scalarized instead of
8308   // vectorized for the chosen vectorization factor.
8309   auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
8310     return CM.isScalarAfterVectorization(I, VF) ||
8311            CM.isProfitableToScalarize(I, VF);
8312   };
8313 
8314   bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange(
8315       [&](ElementCount VF) {
8316         // Returns true if we should generate a scalar version of \p IV.
8317         if (ShouldScalarizeInstruction(PhiOrTrunc, VF))
8318           return true;
8319         auto isScalarInst = [&](User *U) -> bool {
8320           auto *I = cast<Instruction>(U);
8321           return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF);
8322         };
8323         return any_of(PhiOrTrunc->users(), isScalarInst);
8324       },
8325       Range);
8326   bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
8327       [&](ElementCount VF) {
8328         return ShouldScalarizeInstruction(PhiOrTrunc, VF);
8329       },
8330       Range);
8331   assert(IndDesc.getStartValue() ==
8332          Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
8333   assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
8334          "step must be loop invariant");
8335   if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8336     return new VPWidenIntOrFpInductionRecipe(
8337         Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE);
8338   }
8339   assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
8340   return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV,
8341                                            !NeedsScalarIVOnly, SE);
8342 }
8343 
8344 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
8345     PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const {
8346 
8347   // Check if this is an integer or fp induction. If so, build the recipe that
8348   // produces its scalar and vector values.
8349   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
8350     return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM,
8351                                       *PSE.getSE(), *OrigLoop, Range);
8352 
8353   return nullptr;
8354 }
8355 
8356 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8357     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8358     VPlan &Plan) const {
8359   // Optimize the special case where the source is a constant integer
8360   // induction variable. Notice that we can only optimize the 'trunc' case
8361   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8362   // (c) other casts depend on pointer size.
8363 
8364   // Determine whether \p K is a truncation based on an induction variable that
8365   // can be optimized.
8366   auto isOptimizableIVTruncate =
8367       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8368     return [=](ElementCount VF) -> bool {
8369       return CM.isOptimizableIVTruncate(K, VF);
8370     };
8371   };
8372 
8373   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8374           isOptimizableIVTruncate(I), Range)) {
8375 
8376     auto *Phi = cast<PHINode>(I->getOperand(0));
8377     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8378     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8379     return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(),
8380                                       *OrigLoop, Range);
8381   }
8382   return nullptr;
8383 }
8384 
8385 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8386                                                 ArrayRef<VPValue *> Operands,
8387                                                 VPlanPtr &Plan) {
8388   // If all incoming values are equal, the incoming VPValue can be used directly
8389   // instead of creating a new VPBlendRecipe.
8390   VPValue *FirstIncoming = Operands[0];
8391   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8392         return FirstIncoming == Inc;
8393       })) {
8394     return Operands[0];
8395   }
8396 
8397   unsigned NumIncoming = Phi->getNumIncomingValues();
8398   // For in-loop reductions, we do not need to create an additional select.
8399   VPValue *InLoopVal = nullptr;
8400   for (unsigned In = 0; In < NumIncoming; In++) {
8401     PHINode *PhiOp =
8402         dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue());
8403     if (PhiOp && CM.isInLoopReduction(PhiOp)) {
8404       assert(!InLoopVal && "Found more than one in-loop reduction!");
8405       InLoopVal = Operands[In];
8406     }
8407   }
8408 
8409   assert((!InLoopVal || NumIncoming == 2) &&
8410          "Found an in-loop reduction for PHI with unexpected number of "
8411          "incoming values");
8412   if (InLoopVal)
8413     return Operands[Operands[0] == InLoopVal ? 1 : 0];
8414 
8415   // We know that all PHIs in non-header blocks are converted into selects, so
8416   // we don't have to worry about the insertion order and we can just use the
8417   // builder. At this point we generate the predication tree. There may be
8418   // duplications since this is a simple recursive scan, but future
8419   // optimizations will clean it up.
8420   SmallVector<VPValue *, 2> OperandsWithMask;
8421 
8422   for (unsigned In = 0; In < NumIncoming; In++) {
8423     VPValue *EdgeMask =
8424       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8425     assert((EdgeMask || NumIncoming == 1) &&
8426            "Multiple predecessors with one having a full mask");
8427     OperandsWithMask.push_back(Operands[In]);
8428     if (EdgeMask)
8429       OperandsWithMask.push_back(EdgeMask);
8430   }
8431   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8432 }
8433 
8434 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8435                                                    ArrayRef<VPValue *> Operands,
8436                                                    VFRange &Range) const {
8437 
8438   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8439       [this, CI](ElementCount VF) {
8440         return CM.isScalarWithPredication(CI, VF);
8441       },
8442       Range);
8443 
8444   if (IsPredicated)
8445     return nullptr;
8446 
8447   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8448   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8449              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8450              ID == Intrinsic::pseudoprobe ||
8451              ID == Intrinsic::experimental_noalias_scope_decl))
8452     return nullptr;
8453 
8454   auto willWiden = [&](ElementCount VF) -> bool {
8455     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8456     // The following case may be scalarized depending on the VF.
8457     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8458     // version of the instruction.
8459     // Is it beneficial to perform intrinsic call compared to lib call?
8460     bool NeedToScalarize = false;
8461     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8462     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8463     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8464     return UseVectorIntrinsic || !NeedToScalarize;
8465   };
8466 
8467   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8468     return nullptr;
8469 
8470   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8471   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8472 }
8473 
8474 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8475   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8476          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8477   // Instruction should be widened, unless it is scalar after vectorization,
8478   // scalarization is profitable or it is predicated.
8479   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8480     return CM.isScalarAfterVectorization(I, VF) ||
8481            CM.isProfitableToScalarize(I, VF) ||
8482            CM.isScalarWithPredication(I, VF);
8483   };
8484   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8485                                                              Range);
8486 }
8487 
8488 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8489                                            ArrayRef<VPValue *> Operands) const {
8490   auto IsVectorizableOpcode = [](unsigned Opcode) {
8491     switch (Opcode) {
8492     case Instruction::Add:
8493     case Instruction::And:
8494     case Instruction::AShr:
8495     case Instruction::BitCast:
8496     case Instruction::FAdd:
8497     case Instruction::FCmp:
8498     case Instruction::FDiv:
8499     case Instruction::FMul:
8500     case Instruction::FNeg:
8501     case Instruction::FPExt:
8502     case Instruction::FPToSI:
8503     case Instruction::FPToUI:
8504     case Instruction::FPTrunc:
8505     case Instruction::FRem:
8506     case Instruction::FSub:
8507     case Instruction::ICmp:
8508     case Instruction::IntToPtr:
8509     case Instruction::LShr:
8510     case Instruction::Mul:
8511     case Instruction::Or:
8512     case Instruction::PtrToInt:
8513     case Instruction::SDiv:
8514     case Instruction::Select:
8515     case Instruction::SExt:
8516     case Instruction::Shl:
8517     case Instruction::SIToFP:
8518     case Instruction::SRem:
8519     case Instruction::Sub:
8520     case Instruction::Trunc:
8521     case Instruction::UDiv:
8522     case Instruction::UIToFP:
8523     case Instruction::URem:
8524     case Instruction::Xor:
8525     case Instruction::ZExt:
8526       return true;
8527     }
8528     return false;
8529   };
8530 
8531   if (!IsVectorizableOpcode(I->getOpcode()))
8532     return nullptr;
8533 
8534   // Success: widen this instruction.
8535   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8536 }
8537 
8538 void VPRecipeBuilder::fixHeaderPhis() {
8539   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8540   for (VPHeaderPHIRecipe *R : PhisToFix) {
8541     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8542     VPRecipeBase *IncR =
8543         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8544     R->addOperand(IncR->getVPSingleValue());
8545   }
8546 }
8547 
8548 VPBasicBlock *VPRecipeBuilder::handleReplication(
8549     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8550     VPlanPtr &Plan) {
8551   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8552       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8553       Range);
8554 
8555   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8556       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8557       Range);
8558 
8559   // Even if the instruction is not marked as uniform, there are certain
8560   // intrinsic calls that can be effectively treated as such, so we check for
8561   // them here. Conservatively, we only do this for scalable vectors, since
8562   // for fixed-width VFs we can always fall back on full scalarization.
8563   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8564     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8565     case Intrinsic::assume:
8566     case Intrinsic::lifetime_start:
8567     case Intrinsic::lifetime_end:
8568       // For scalable vectors if one of the operands is variant then we still
8569       // want to mark as uniform, which will generate one instruction for just
8570       // the first lane of the vector. We can't scalarize the call in the same
8571       // way as for fixed-width vectors because we don't know how many lanes
8572       // there are.
8573       //
8574       // The reasons for doing it this way for scalable vectors are:
8575       //   1. For the assume intrinsic generating the instruction for the first
8576       //      lane is still be better than not generating any at all. For
8577       //      example, the input may be a splat across all lanes.
8578       //   2. For the lifetime start/end intrinsics the pointer operand only
8579       //      does anything useful when the input comes from a stack object,
8580       //      which suggests it should always be uniform. For non-stack objects
8581       //      the effect is to poison the object, which still allows us to
8582       //      remove the call.
8583       IsUniform = true;
8584       break;
8585     default:
8586       break;
8587     }
8588   }
8589 
8590   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8591                                        IsUniform, IsPredicated);
8592   setRecipe(I, Recipe);
8593   Plan->addVPValue(I, Recipe);
8594 
8595   // Find if I uses a predicated instruction. If so, it will use its scalar
8596   // value. Avoid hoisting the insert-element which packs the scalar value into
8597   // a vector value, as that happens iff all users use the vector value.
8598   for (VPValue *Op : Recipe->operands()) {
8599     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8600     if (!PredR)
8601       continue;
8602     auto *RepR =
8603         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8604     assert(RepR->isPredicated() &&
8605            "expected Replicate recipe to be predicated");
8606     RepR->setAlsoPack(false);
8607   }
8608 
8609   // Finalize the recipe for Instr, first if it is not predicated.
8610   if (!IsPredicated) {
8611     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8612     VPBB->appendRecipe(Recipe);
8613     return VPBB;
8614   }
8615   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8616 
8617   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8618   assert(SingleSucc && "VPBB must have a single successor when handling "
8619                        "predicated replication.");
8620   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8621   // Record predicated instructions for above packing optimizations.
8622   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8623   VPBlockUtils::insertBlockAfter(Region, VPBB);
8624   auto *RegSucc = new VPBasicBlock();
8625   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8626   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8627   return RegSucc;
8628 }
8629 
8630 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8631                                                       VPRecipeBase *PredRecipe,
8632                                                       VPlanPtr &Plan) {
8633   // Instructions marked for predication are replicated and placed under an
8634   // if-then construct to prevent side-effects.
8635 
8636   // Generate recipes to compute the block mask for this region.
8637   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8638 
8639   // Build the triangular if-then region.
8640   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8641   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8642   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8643   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8644   auto *PHIRecipe = Instr->getType()->isVoidTy()
8645                         ? nullptr
8646                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8647   if (PHIRecipe) {
8648     Plan->removeVPValueFor(Instr);
8649     Plan->addVPValue(Instr, PHIRecipe);
8650   }
8651   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8652   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8653   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8654 
8655   // Note: first set Entry as region entry and then connect successors starting
8656   // from it in order, to propagate the "parent" of each VPBasicBlock.
8657   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8658   VPBlockUtils::connectBlocks(Pred, Exit);
8659 
8660   return Region;
8661 }
8662 
8663 VPRecipeOrVPValueTy
8664 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8665                                         ArrayRef<VPValue *> Operands,
8666                                         VFRange &Range, VPlanPtr &Plan) {
8667   // First, check for specific widening recipes that deal with calls, memory
8668   // operations, inductions and Phi nodes.
8669   if (auto *CI = dyn_cast<CallInst>(Instr))
8670     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8671 
8672   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8673     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8674 
8675   VPRecipeBase *Recipe;
8676   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8677     if (Phi->getParent() != OrigLoop->getHeader())
8678       return tryToBlend(Phi, Operands, Plan);
8679     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8680       return toVPRecipeResult(Recipe);
8681 
8682     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8683     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8684       VPValue *StartV = Operands[0];
8685       if (Legal->isReductionVariable(Phi)) {
8686         const RecurrenceDescriptor &RdxDesc =
8687             Legal->getReductionVars().find(Phi)->second;
8688         assert(RdxDesc.getRecurrenceStartValue() ==
8689                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8690         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8691                                              CM.isInLoopReduction(Phi),
8692                                              CM.useOrderedReductions(RdxDesc));
8693       } else {
8694         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8695       }
8696 
8697       // Record the incoming value from the backedge, so we can add the incoming
8698       // value from the backedge after all recipes have been created.
8699       recordRecipeOf(cast<Instruction>(
8700           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8701       PhisToFix.push_back(PhiRecipe);
8702     } else {
8703       // TODO: record backedge value for remaining pointer induction phis.
8704       assert(Phi->getType()->isPointerTy() &&
8705              "only pointer phis should be handled here");
8706       assert(Legal->getInductionVars().count(Phi) &&
8707              "Not an induction variable");
8708       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8709       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8710       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8711     }
8712 
8713     return toVPRecipeResult(PhiRecipe);
8714   }
8715 
8716   if (isa<TruncInst>(Instr) &&
8717       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8718                                                Range, *Plan)))
8719     return toVPRecipeResult(Recipe);
8720 
8721   if (!shouldWiden(Instr, Range))
8722     return nullptr;
8723 
8724   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8725     return toVPRecipeResult(new VPWidenGEPRecipe(
8726         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8727 
8728   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8729     bool InvariantCond =
8730         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8731     return toVPRecipeResult(new VPWidenSelectRecipe(
8732         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8733   }
8734 
8735   return toVPRecipeResult(tryToWiden(Instr, Operands));
8736 }
8737 
8738 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8739                                                         ElementCount MaxVF) {
8740   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8741 
8742   // Collect instructions from the original loop that will become trivially dead
8743   // in the vectorized loop. We don't need to vectorize these instructions. For
8744   // example, original induction update instructions can become dead because we
8745   // separately emit induction "steps" when generating code for the new loop.
8746   // Similarly, we create a new latch condition when setting up the structure
8747   // of the new loop, so the old one can become dead.
8748   SmallPtrSet<Instruction *, 4> DeadInstructions;
8749   collectTriviallyDeadInstructions(DeadInstructions);
8750 
8751   // Add assume instructions we need to drop to DeadInstructions, to prevent
8752   // them from being added to the VPlan.
8753   // TODO: We only need to drop assumes in blocks that get flattend. If the
8754   // control flow is preserved, we should keep them.
8755   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8756   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8757 
8758   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8759   // Dead instructions do not need sinking. Remove them from SinkAfter.
8760   for (Instruction *I : DeadInstructions)
8761     SinkAfter.erase(I);
8762 
8763   // Cannot sink instructions after dead instructions (there won't be any
8764   // recipes for them). Instead, find the first non-dead previous instruction.
8765   for (auto &P : Legal->getSinkAfter()) {
8766     Instruction *SinkTarget = P.second;
8767     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8768     (void)FirstInst;
8769     while (DeadInstructions.contains(SinkTarget)) {
8770       assert(
8771           SinkTarget != FirstInst &&
8772           "Must find a live instruction (at least the one feeding the "
8773           "first-order recurrence PHI) before reaching beginning of the block");
8774       SinkTarget = SinkTarget->getPrevNode();
8775       assert(SinkTarget != P.first &&
8776              "sink source equals target, no sinking required");
8777     }
8778     P.second = SinkTarget;
8779   }
8780 
8781   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8782   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8783     VFRange SubRange = {VF, MaxVFPlusOne};
8784     VPlans.push_back(
8785         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8786     VF = SubRange.End;
8787   }
8788 }
8789 
8790 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8791 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8792 // BranchOnCount VPInstruction to the latch.
8793 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8794                                   bool HasNUW, bool IsVPlanNative) {
8795   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8796   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8797 
8798   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8799   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8800   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8801   if (IsVPlanNative)
8802     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8803   Header->insert(CanonicalIVPHI, Header->begin());
8804 
8805   auto *CanonicalIVIncrement =
8806       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8807                                : VPInstruction::CanonicalIVIncrement,
8808                         {CanonicalIVPHI}, DL);
8809   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8810 
8811   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8812   if (IsVPlanNative) {
8813     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8814     EB->setCondBit(nullptr);
8815   }
8816   EB->appendRecipe(CanonicalIVIncrement);
8817 
8818   auto *BranchOnCount =
8819       new VPInstruction(VPInstruction::BranchOnCount,
8820                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8821   EB->appendRecipe(BranchOnCount);
8822 }
8823 
8824 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8825     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8826     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8827 
8828   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8829 
8830   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8831 
8832   // ---------------------------------------------------------------------------
8833   // Pre-construction: record ingredients whose recipes we'll need to further
8834   // process after constructing the initial VPlan.
8835   // ---------------------------------------------------------------------------
8836 
8837   // Mark instructions we'll need to sink later and their targets as
8838   // ingredients whose recipe we'll need to record.
8839   for (auto &Entry : SinkAfter) {
8840     RecipeBuilder.recordRecipeOf(Entry.first);
8841     RecipeBuilder.recordRecipeOf(Entry.second);
8842   }
8843   for (auto &Reduction : CM.getInLoopReductionChains()) {
8844     PHINode *Phi = Reduction.first;
8845     RecurKind Kind =
8846         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8847     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8848 
8849     RecipeBuilder.recordRecipeOf(Phi);
8850     for (auto &R : ReductionOperations) {
8851       RecipeBuilder.recordRecipeOf(R);
8852       // For min/max reducitons, where we have a pair of icmp/select, we also
8853       // need to record the ICmp recipe, so it can be removed later.
8854       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8855              "Only min/max recurrences allowed for inloop reductions");
8856       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8857         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8858     }
8859   }
8860 
8861   // For each interleave group which is relevant for this (possibly trimmed)
8862   // Range, add it to the set of groups to be later applied to the VPlan and add
8863   // placeholders for its members' Recipes which we'll be replacing with a
8864   // single VPInterleaveRecipe.
8865   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8866     auto applyIG = [IG, this](ElementCount VF) -> bool {
8867       return (VF.isVector() && // Query is illegal for VF == 1
8868               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8869                   LoopVectorizationCostModel::CM_Interleave);
8870     };
8871     if (!getDecisionAndClampRange(applyIG, Range))
8872       continue;
8873     InterleaveGroups.insert(IG);
8874     for (unsigned i = 0; i < IG->getFactor(); i++)
8875       if (Instruction *Member = IG->getMember(i))
8876         RecipeBuilder.recordRecipeOf(Member);
8877   };
8878 
8879   // ---------------------------------------------------------------------------
8880   // Build initial VPlan: Scan the body of the loop in a topological order to
8881   // visit each basic block after having visited its predecessor basic blocks.
8882   // ---------------------------------------------------------------------------
8883 
8884   // Create initial VPlan skeleton, with separate header and latch blocks.
8885   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
8886   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
8887   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
8888   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
8889   auto Plan = std::make_unique<VPlan>(TopRegion);
8890 
8891   Instruction *DLInst =
8892       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
8893   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
8894                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
8895                         !CM.foldTailByMasking(), false);
8896 
8897   // Scan the body of the loop in a topological order to visit each basic block
8898   // after having visited its predecessor basic blocks.
8899   LoopBlocksDFS DFS(OrigLoop);
8900   DFS.perform(LI);
8901 
8902   VPBasicBlock *VPBB = HeaderVPBB;
8903   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
8904   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8905     // Relevant instructions from basic block BB will be grouped into VPRecipe
8906     // ingredients and fill a new VPBasicBlock.
8907     unsigned VPBBsForBB = 0;
8908     VPBB->setName(BB->getName());
8909     Builder.setInsertPoint(VPBB);
8910 
8911     // Introduce each ingredient into VPlan.
8912     // TODO: Model and preserve debug instrinsics in VPlan.
8913     for (Instruction &I : BB->instructionsWithoutDebug()) {
8914       Instruction *Instr = &I;
8915 
8916       // First filter out irrelevant instructions, to ensure no recipes are
8917       // built for them.
8918       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8919         continue;
8920 
8921       SmallVector<VPValue *, 4> Operands;
8922       auto *Phi = dyn_cast<PHINode>(Instr);
8923       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
8924         Operands.push_back(Plan->getOrAddVPValue(
8925             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
8926       } else {
8927         auto OpRange = Plan->mapToVPValues(Instr->operands());
8928         Operands = {OpRange.begin(), OpRange.end()};
8929       }
8930       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
8931               Instr, Operands, Range, Plan)) {
8932         // If Instr can be simplified to an existing VPValue, use it.
8933         if (RecipeOrValue.is<VPValue *>()) {
8934           auto *VPV = RecipeOrValue.get<VPValue *>();
8935           Plan->addVPValue(Instr, VPV);
8936           // If the re-used value is a recipe, register the recipe for the
8937           // instruction, in case the recipe for Instr needs to be recorded.
8938           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
8939             RecipeBuilder.setRecipe(Instr, R);
8940           continue;
8941         }
8942         // Otherwise, add the new recipe.
8943         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8944         for (auto *Def : Recipe->definedValues()) {
8945           auto *UV = Def->getUnderlyingValue();
8946           Plan->addVPValue(UV, Def);
8947         }
8948 
8949         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
8950             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
8951           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
8952           // of the header block. That can happen for truncates of induction
8953           // variables. Those recipes are moved to the phi section of the header
8954           // block after applying SinkAfter, which relies on the original
8955           // position of the trunc.
8956           assert(isa<TruncInst>(Instr));
8957           InductionsToMove.push_back(
8958               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
8959         }
8960         RecipeBuilder.setRecipe(Instr, Recipe);
8961         VPBB->appendRecipe(Recipe);
8962         continue;
8963       }
8964 
8965       // Otherwise, if all widening options failed, Instruction is to be
8966       // replicated. This may create a successor for VPBB.
8967       VPBasicBlock *NextVPBB =
8968           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
8969       if (NextVPBB != VPBB) {
8970         VPBB = NextVPBB;
8971         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8972                                     : "");
8973       }
8974     }
8975 
8976     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
8977     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
8978   }
8979 
8980   // Fold the last, empty block into its predecessor.
8981   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
8982   assert(VPBB && "expected to fold last (empty) block");
8983   // After here, VPBB should not be used.
8984   VPBB = nullptr;
8985 
8986   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
8987          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
8988          "entry block must be set to a VPRegionBlock having a non-empty entry "
8989          "VPBasicBlock");
8990   RecipeBuilder.fixHeaderPhis();
8991 
8992   // ---------------------------------------------------------------------------
8993   // Transform initial VPlan: Apply previously taken decisions, in order, to
8994   // bring the VPlan to its final state.
8995   // ---------------------------------------------------------------------------
8996 
8997   // Apply Sink-After legal constraints.
8998   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
8999     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9000     if (Region && Region->isReplicator()) {
9001       assert(Region->getNumSuccessors() == 1 &&
9002              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9003       assert(R->getParent()->size() == 1 &&
9004              "A recipe in an original replicator region must be the only "
9005              "recipe in its block");
9006       return Region;
9007     }
9008     return nullptr;
9009   };
9010   for (auto &Entry : SinkAfter) {
9011     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9012     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9013 
9014     auto *TargetRegion = GetReplicateRegion(Target);
9015     auto *SinkRegion = GetReplicateRegion(Sink);
9016     if (!SinkRegion) {
9017       // If the sink source is not a replicate region, sink the recipe directly.
9018       if (TargetRegion) {
9019         // The target is in a replication region, make sure to move Sink to
9020         // the block after it, not into the replication region itself.
9021         VPBasicBlock *NextBlock =
9022             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9023         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9024       } else
9025         Sink->moveAfter(Target);
9026       continue;
9027     }
9028 
9029     // The sink source is in a replicate region. Unhook the region from the CFG.
9030     auto *SinkPred = SinkRegion->getSinglePredecessor();
9031     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9032     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9033     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9034     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9035 
9036     if (TargetRegion) {
9037       // The target recipe is also in a replicate region, move the sink region
9038       // after the target region.
9039       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9040       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9041       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9042       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9043     } else {
9044       // The sink source is in a replicate region, we need to move the whole
9045       // replicate region, which should only contain a single recipe in the
9046       // main block.
9047       auto *SplitBlock =
9048           Target->getParent()->splitAt(std::next(Target->getIterator()));
9049 
9050       auto *SplitPred = SplitBlock->getSinglePredecessor();
9051 
9052       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9053       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9054       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9055     }
9056   }
9057 
9058   VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
9059   VPlanTransforms::removeRedundantInductionCasts(*Plan);
9060 
9061   // Now that sink-after is done, move induction recipes for optimized truncates
9062   // to the phi section of the header block.
9063   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9064     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9065 
9066   // Adjust the recipes for any inloop reductions.
9067   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
9068                              RecipeBuilder, Range.Start);
9069 
9070   // Introduce a recipe to combine the incoming and previous values of a
9071   // first-order recurrence.
9072   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9073     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9074     if (!RecurPhi)
9075       continue;
9076 
9077     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9078     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9079     auto *Region = GetReplicateRegion(PrevRecipe);
9080     if (Region)
9081       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9082     if (Region || PrevRecipe->isPhi())
9083       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9084     else
9085       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9086 
9087     auto *RecurSplice = cast<VPInstruction>(
9088         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9089                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9090 
9091     RecurPhi->replaceAllUsesWith(RecurSplice);
9092     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9093     // all users.
9094     RecurSplice->setOperand(0, RecurPhi);
9095   }
9096 
9097   // Interleave memory: for each Interleave Group we marked earlier as relevant
9098   // for this VPlan, replace the Recipes widening its memory instructions with a
9099   // single VPInterleaveRecipe at its insertion point.
9100   for (auto IG : InterleaveGroups) {
9101     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9102         RecipeBuilder.getRecipe(IG->getInsertPos()));
9103     SmallVector<VPValue *, 4> StoredValues;
9104     for (unsigned i = 0; i < IG->getFactor(); ++i)
9105       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9106         auto *StoreR =
9107             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9108         StoredValues.push_back(StoreR->getStoredValue());
9109       }
9110 
9111     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9112                                         Recipe->getMask());
9113     VPIG->insertBefore(Recipe);
9114     unsigned J = 0;
9115     for (unsigned i = 0; i < IG->getFactor(); ++i)
9116       if (Instruction *Member = IG->getMember(i)) {
9117         if (!Member->getType()->isVoidTy()) {
9118           VPValue *OriginalV = Plan->getVPValue(Member);
9119           Plan->removeVPValueFor(Member);
9120           Plan->addVPValue(Member, VPIG->getVPValue(J));
9121           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9122           J++;
9123         }
9124         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9125       }
9126   }
9127 
9128   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9129   // in ways that accessing values using original IR values is incorrect.
9130   Plan->disableValue2VPValue();
9131 
9132   VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
9133   VPlanTransforms::sinkScalarOperands(*Plan);
9134   VPlanTransforms::mergeReplicateRegions(*Plan);
9135   VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop);
9136 
9137   std::string PlanName;
9138   raw_string_ostream RSO(PlanName);
9139   ElementCount VF = Range.Start;
9140   Plan->addVF(VF);
9141   RSO << "Initial VPlan for VF={" << VF;
9142   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9143     Plan->addVF(VF);
9144     RSO << "," << VF;
9145   }
9146   RSO << "},UF>=1";
9147   RSO.flush();
9148   Plan->setName(PlanName);
9149 
9150   // Fold Exit block into its predecessor if possible.
9151   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9152   // VPBasicBlock as exit.
9153   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9154 
9155   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9156   return Plan;
9157 }
9158 
9159 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9160   // Outer loop handling: They may require CFG and instruction level
9161   // transformations before even evaluating whether vectorization is profitable.
9162   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9163   // the vectorization pipeline.
9164   assert(!OrigLoop->isInnermost());
9165   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9166 
9167   // Create new empty VPlan
9168   auto Plan = std::make_unique<VPlan>();
9169 
9170   // Build hierarchical CFG
9171   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9172   HCFGBuilder.buildHierarchicalCFG();
9173 
9174   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9175        VF *= 2)
9176     Plan->addVF(VF);
9177 
9178   if (EnableVPlanPredication) {
9179     VPlanPredicator VPP(*Plan);
9180     VPP.predicate();
9181 
9182     // Avoid running transformation to recipes until masked code generation in
9183     // VPlan-native path is in place.
9184     return Plan;
9185   }
9186 
9187   SmallPtrSet<Instruction *, 1> DeadInstructions;
9188   VPlanTransforms::VPInstructionsToVPRecipes(
9189       OrigLoop, Plan,
9190       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9191       DeadInstructions, *PSE.getSE());
9192 
9193   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9194                         true, true);
9195   return Plan;
9196 }
9197 
9198 // Adjust the recipes for reductions. For in-loop reductions the chain of
9199 // instructions leading from the loop exit instr to the phi need to be converted
9200 // to reductions, with one operand being vector and the other being the scalar
9201 // reduction chain. For other reductions, a select is introduced between the phi
9202 // and live-out recipes when folding the tail.
9203 void LoopVectorizationPlanner::adjustRecipesForReductions(
9204     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9205     ElementCount MinVF) {
9206   for (auto &Reduction : CM.getInLoopReductionChains()) {
9207     PHINode *Phi = Reduction.first;
9208     const RecurrenceDescriptor &RdxDesc =
9209         Legal->getReductionVars().find(Phi)->second;
9210     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9211 
9212     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9213       continue;
9214 
9215     // ReductionOperations are orders top-down from the phi's use to the
9216     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9217     // which of the two operands will remain scalar and which will be reduced.
9218     // For minmax the chain will be the select instructions.
9219     Instruction *Chain = Phi;
9220     for (Instruction *R : ReductionOperations) {
9221       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9222       RecurKind Kind = RdxDesc.getRecurrenceKind();
9223 
9224       VPValue *ChainOp = Plan->getVPValue(Chain);
9225       unsigned FirstOpId;
9226       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9227              "Only min/max recurrences allowed for inloop reductions");
9228       // Recognize a call to the llvm.fmuladd intrinsic.
9229       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9230       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9231              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9232       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9233         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9234                "Expected to replace a VPWidenSelectSC");
9235         FirstOpId = 1;
9236       } else {
9237         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9238                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9239                "Expected to replace a VPWidenSC");
9240         FirstOpId = 0;
9241       }
9242       unsigned VecOpId =
9243           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9244       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9245 
9246       auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent())
9247                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9248                          : nullptr;
9249 
9250       if (IsFMulAdd) {
9251         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9252         // need to create an fmul recipe to use as the vector operand for the
9253         // fadd reduction.
9254         VPInstruction *FMulRecipe = new VPInstruction(
9255             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9256         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9257         WidenRecipe->getParent()->insert(FMulRecipe,
9258                                          WidenRecipe->getIterator());
9259         VecOp = FMulRecipe;
9260       }
9261       VPReductionRecipe *RedRecipe =
9262           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9263       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9264       Plan->removeVPValueFor(R);
9265       Plan->addVPValue(R, RedRecipe);
9266       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9267       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9268       WidenRecipe->eraseFromParent();
9269 
9270       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9271         VPRecipeBase *CompareRecipe =
9272             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9273         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9274                "Expected to replace a VPWidenSC");
9275         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9276                "Expected no remaining users");
9277         CompareRecipe->eraseFromParent();
9278       }
9279       Chain = R;
9280     }
9281   }
9282 
9283   // If tail is folded by masking, introduce selects between the phi
9284   // and the live-out instruction of each reduction, at the beginning of the
9285   // dedicated latch block.
9286   if (CM.foldTailByMasking()) {
9287     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9288     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9289       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9290       if (!PhiR || PhiR->isInLoop())
9291         continue;
9292       VPValue *Cond =
9293           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9294       VPValue *Red = PhiR->getBackedgeValue();
9295       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9296              "reduction recipe must be defined before latch");
9297       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9298     }
9299   }
9300 }
9301 
9302 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9303 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9304                                VPSlotTracker &SlotTracker) const {
9305   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9306   IG->getInsertPos()->printAsOperand(O, false);
9307   O << ", ";
9308   getAddr()->printAsOperand(O, SlotTracker);
9309   VPValue *Mask = getMask();
9310   if (Mask) {
9311     O << ", ";
9312     Mask->printAsOperand(O, SlotTracker);
9313   }
9314 
9315   unsigned OpIdx = 0;
9316   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9317     if (!IG->getMember(i))
9318       continue;
9319     if (getNumStoreOperands() > 0) {
9320       O << "\n" << Indent << "  store ";
9321       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9322       O << " to index " << i;
9323     } else {
9324       O << "\n" << Indent << "  ";
9325       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9326       O << " = load from index " << i;
9327     }
9328     ++OpIdx;
9329   }
9330 }
9331 #endif
9332 
9333 void VPWidenCallRecipe::execute(VPTransformState &State) {
9334   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9335                                   *this, State);
9336 }
9337 
9338 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9339   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9340   State.ILV->setDebugLocFromInst(&I);
9341 
9342   // The condition can be loop invariant  but still defined inside the
9343   // loop. This means that we can't just use the original 'cond' value.
9344   // We have to take the 'vectorized' value and pick the first lane.
9345   // Instcombine will make this a no-op.
9346   auto *InvarCond =
9347       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9348 
9349   for (unsigned Part = 0; Part < State.UF; ++Part) {
9350     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9351     Value *Op0 = State.get(getOperand(1), Part);
9352     Value *Op1 = State.get(getOperand(2), Part);
9353     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9354     State.set(this, Sel, Part);
9355     State.ILV->addMetadata(Sel, &I);
9356   }
9357 }
9358 
9359 void VPWidenRecipe::execute(VPTransformState &State) {
9360   auto &I = *cast<Instruction>(getUnderlyingValue());
9361   auto &Builder = State.Builder;
9362   switch (I.getOpcode()) {
9363   case Instruction::Call:
9364   case Instruction::Br:
9365   case Instruction::PHI:
9366   case Instruction::GetElementPtr:
9367   case Instruction::Select:
9368     llvm_unreachable("This instruction is handled by a different recipe.");
9369   case Instruction::UDiv:
9370   case Instruction::SDiv:
9371   case Instruction::SRem:
9372   case Instruction::URem:
9373   case Instruction::Add:
9374   case Instruction::FAdd:
9375   case Instruction::Sub:
9376   case Instruction::FSub:
9377   case Instruction::FNeg:
9378   case Instruction::Mul:
9379   case Instruction::FMul:
9380   case Instruction::FDiv:
9381   case Instruction::FRem:
9382   case Instruction::Shl:
9383   case Instruction::LShr:
9384   case Instruction::AShr:
9385   case Instruction::And:
9386   case Instruction::Or:
9387   case Instruction::Xor: {
9388     // Just widen unops and binops.
9389     State.ILV->setDebugLocFromInst(&I);
9390 
9391     for (unsigned Part = 0; Part < State.UF; ++Part) {
9392       SmallVector<Value *, 2> Ops;
9393       for (VPValue *VPOp : operands())
9394         Ops.push_back(State.get(VPOp, Part));
9395 
9396       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9397 
9398       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9399         VecOp->copyIRFlags(&I);
9400 
9401         // If the instruction is vectorized and was in a basic block that needed
9402         // predication, we can't propagate poison-generating flags (nuw/nsw,
9403         // exact, etc.). The control flow has been linearized and the
9404         // instruction is no longer guarded by the predicate, which could make
9405         // the flag properties to no longer hold.
9406         if (State.MayGeneratePoisonRecipes.contains(this))
9407           VecOp->dropPoisonGeneratingFlags();
9408       }
9409 
9410       // Use this vector value for all users of the original instruction.
9411       State.set(this, V, Part);
9412       State.ILV->addMetadata(V, &I);
9413     }
9414 
9415     break;
9416   }
9417   case Instruction::ICmp:
9418   case Instruction::FCmp: {
9419     // Widen compares. Generate vector compares.
9420     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9421     auto *Cmp = cast<CmpInst>(&I);
9422     State.ILV->setDebugLocFromInst(Cmp);
9423     for (unsigned Part = 0; Part < State.UF; ++Part) {
9424       Value *A = State.get(getOperand(0), Part);
9425       Value *B = State.get(getOperand(1), Part);
9426       Value *C = nullptr;
9427       if (FCmp) {
9428         // Propagate fast math flags.
9429         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9430         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9431         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9432       } else {
9433         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9434       }
9435       State.set(this, C, Part);
9436       State.ILV->addMetadata(C, &I);
9437     }
9438 
9439     break;
9440   }
9441 
9442   case Instruction::ZExt:
9443   case Instruction::SExt:
9444   case Instruction::FPToUI:
9445   case Instruction::FPToSI:
9446   case Instruction::FPExt:
9447   case Instruction::PtrToInt:
9448   case Instruction::IntToPtr:
9449   case Instruction::SIToFP:
9450   case Instruction::UIToFP:
9451   case Instruction::Trunc:
9452   case Instruction::FPTrunc:
9453   case Instruction::BitCast: {
9454     auto *CI = cast<CastInst>(&I);
9455     State.ILV->setDebugLocFromInst(CI);
9456 
9457     /// Vectorize casts.
9458     Type *DestTy = (State.VF.isScalar())
9459                        ? CI->getType()
9460                        : VectorType::get(CI->getType(), State.VF);
9461 
9462     for (unsigned Part = 0; Part < State.UF; ++Part) {
9463       Value *A = State.get(getOperand(0), Part);
9464       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9465       State.set(this, Cast, Part);
9466       State.ILV->addMetadata(Cast, &I);
9467     }
9468     break;
9469   }
9470   default:
9471     // This instruction is not vectorized by simple widening.
9472     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9473     llvm_unreachable("Unhandled instruction!");
9474   } // end of switch.
9475 }
9476 
9477 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9478   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9479   // Construct a vector GEP by widening the operands of the scalar GEP as
9480   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9481   // results in a vector of pointers when at least one operand of the GEP
9482   // is vector-typed. Thus, to keep the representation compact, we only use
9483   // vector-typed operands for loop-varying values.
9484 
9485   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9486     // If we are vectorizing, but the GEP has only loop-invariant operands,
9487     // the GEP we build (by only using vector-typed operands for
9488     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9489     // produce a vector of pointers, we need to either arbitrarily pick an
9490     // operand to broadcast, or broadcast a clone of the original GEP.
9491     // Here, we broadcast a clone of the original.
9492     //
9493     // TODO: If at some point we decide to scalarize instructions having
9494     //       loop-invariant operands, this special case will no longer be
9495     //       required. We would add the scalarization decision to
9496     //       collectLoopScalars() and teach getVectorValue() to broadcast
9497     //       the lane-zero scalar value.
9498     auto *Clone = State.Builder.Insert(GEP->clone());
9499     for (unsigned Part = 0; Part < State.UF; ++Part) {
9500       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9501       State.set(this, EntryPart, Part);
9502       State.ILV->addMetadata(EntryPart, GEP);
9503     }
9504   } else {
9505     // If the GEP has at least one loop-varying operand, we are sure to
9506     // produce a vector of pointers. But if we are only unrolling, we want
9507     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9508     // produce with the code below will be scalar (if VF == 1) or vector
9509     // (otherwise). Note that for the unroll-only case, we still maintain
9510     // values in the vector mapping with initVector, as we do for other
9511     // instructions.
9512     for (unsigned Part = 0; Part < State.UF; ++Part) {
9513       // The pointer operand of the new GEP. If it's loop-invariant, we
9514       // won't broadcast it.
9515       auto *Ptr = IsPtrLoopInvariant
9516                       ? State.get(getOperand(0), VPIteration(0, 0))
9517                       : State.get(getOperand(0), Part);
9518 
9519       // Collect all the indices for the new GEP. If any index is
9520       // loop-invariant, we won't broadcast it.
9521       SmallVector<Value *, 4> Indices;
9522       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9523         VPValue *Operand = getOperand(I);
9524         if (IsIndexLoopInvariant[I - 1])
9525           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9526         else
9527           Indices.push_back(State.get(Operand, Part));
9528       }
9529 
9530       // If the GEP instruction is vectorized and was in a basic block that
9531       // needed predication, we can't propagate the poison-generating 'inbounds'
9532       // flag. The control flow has been linearized and the GEP is no longer
9533       // guarded by the predicate, which could make the 'inbounds' properties to
9534       // no longer hold.
9535       bool IsInBounds =
9536           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9537 
9538       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9539       // but it should be a vector, otherwise.
9540       auto *NewGEP = IsInBounds
9541                          ? State.Builder.CreateInBoundsGEP(
9542                                GEP->getSourceElementType(), Ptr, Indices)
9543                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9544                                                    Ptr, Indices);
9545       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9546              "NewGEP is not a pointer vector");
9547       State.set(this, NewGEP, Part);
9548       State.ILV->addMetadata(NewGEP, GEP);
9549     }
9550   }
9551 }
9552 
9553 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9554   assert(!State.Instance && "Int or FP induction being replicated.");
9555 
9556   Value *Start = getStartValue()->getLiveInIRValue();
9557   const InductionDescriptor &ID = getInductionDescriptor();
9558   TruncInst *Trunc = getTruncInst();
9559   IRBuilderBase &Builder = State.Builder;
9560   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
9561   assert(State.VF.isVector() && "must have vector VF");
9562 
9563   // The value from the original loop to which we are mapping the new induction
9564   // variable.
9565   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
9566 
9567   auto &DL = EntryVal->getModule()->getDataLayout();
9568 
9569   // Generate code for the induction step. Note that induction steps are
9570   // required to be loop-invariant
9571   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
9572     if (SE.isSCEVable(IV->getType())) {
9573       SCEVExpander Exp(SE, DL, "induction");
9574       return Exp.expandCodeFor(Step, Step->getType(),
9575                                State.CFG.VectorPreHeader->getTerminator());
9576     }
9577     return cast<SCEVUnknown>(Step)->getValue();
9578   };
9579 
9580   // Fast-math-flags propagate from the original induction instruction.
9581   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9582   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
9583     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
9584 
9585   // Now do the actual transformations, and start with creating the step value.
9586   Value *Step = CreateStepValue(ID.getStep());
9587 
9588   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
9589          "Expected either an induction phi-node or a truncate of it!");
9590 
9591   // Construct the initial value of the vector IV in the vector loop preheader
9592   auto CurrIP = Builder.saveIP();
9593   Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator());
9594   if (isa<TruncInst>(EntryVal)) {
9595     assert(Start->getType()->isIntegerTy() &&
9596            "Truncation requires an integer type");
9597     auto *TruncType = cast<IntegerType>(EntryVal->getType());
9598     Step = Builder.CreateTrunc(Step, TruncType);
9599     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
9600   }
9601 
9602   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
9603   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
9604   Value *SteppedStart = getStepVector(
9605       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
9606 
9607   // We create vector phi nodes for both integer and floating-point induction
9608   // variables. Here, we determine the kind of arithmetic we will perform.
9609   Instruction::BinaryOps AddOp;
9610   Instruction::BinaryOps MulOp;
9611   if (Step->getType()->isIntegerTy()) {
9612     AddOp = Instruction::Add;
9613     MulOp = Instruction::Mul;
9614   } else {
9615     AddOp = ID.getInductionOpcode();
9616     MulOp = Instruction::FMul;
9617   }
9618 
9619   // Multiply the vectorization factor by the step using integer or
9620   // floating-point arithmetic as appropriate.
9621   Type *StepType = Step->getType();
9622   Value *RuntimeVF;
9623   if (Step->getType()->isFloatingPointTy())
9624     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
9625   else
9626     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
9627   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
9628 
9629   // Create a vector splat to use in the induction update.
9630   //
9631   // FIXME: If the step is non-constant, we create the vector splat with
9632   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
9633   //        handle a constant vector splat.
9634   Value *SplatVF = isa<Constant>(Mul)
9635                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
9636                        : Builder.CreateVectorSplat(State.VF, Mul);
9637   Builder.restoreIP(CurrIP);
9638 
9639   // We may need to add the step a number of times, depending on the unroll
9640   // factor. The last of those goes into the PHI.
9641   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
9642                                     &*State.CFG.PrevBB->getFirstInsertionPt());
9643   VecInd->setDebugLoc(EntryVal->getDebugLoc());
9644   Instruction *LastInduction = VecInd;
9645   for (unsigned Part = 0; Part < State.UF; ++Part) {
9646     State.set(this, LastInduction, Part);
9647 
9648     if (isa<TruncInst>(EntryVal))
9649       State.ILV->addMetadata(LastInduction, EntryVal);
9650 
9651     LastInduction = cast<Instruction>(
9652         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
9653     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
9654   }
9655 
9656   // Move the last step to the end of the latch block. This ensures consistent
9657   // placement of all induction updates.
9658   auto *LoopVectorLatch =
9659       State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
9660   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
9661   LastInduction->moveBefore(Br);
9662   LastInduction->setName("vec.ind.next");
9663 
9664   VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader);
9665   VecInd->addIncoming(LastInduction, LoopVectorLatch);
9666 }
9667 
9668 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
9669   assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
9670 
9671   // Fast-math-flags propagate from the original induction instruction.
9672   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9673   if (IndDesc.getInductionBinOp() &&
9674       isa<FPMathOperator>(IndDesc.getInductionBinOp()))
9675     State.Builder.setFastMathFlags(
9676         IndDesc.getInductionBinOp()->getFastMathFlags());
9677 
9678   Value *Step = State.get(getStepValue(), VPIteration(0, 0));
9679   auto CreateScalarIV = [&](Value *&Step) -> Value * {
9680     Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
9681     auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9682     if (!isCanonical() || CanonicalIV->getType() != Ty) {
9683       ScalarIV =
9684           Ty->isIntegerTy()
9685               ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty)
9686               : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty);
9687       ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
9688                                       getStartValue()->getLiveInIRValue(), Step,
9689                                       IndDesc);
9690       ScalarIV->setName("offset.idx");
9691     }
9692     if (TruncToTy) {
9693       assert(Step->getType()->isIntegerTy() &&
9694              "Truncation requires an integer step");
9695       ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy);
9696       Step = State.Builder.CreateTrunc(Step, TruncToTy);
9697     }
9698     return ScalarIV;
9699   };
9700 
9701   Value *ScalarIV = CreateScalarIV(Step);
9702   if (State.VF.isVector()) {
9703     buildScalarSteps(ScalarIV, Step, IndDesc, this, State);
9704     return;
9705   }
9706 
9707   for (unsigned Part = 0; Part < State.UF; ++Part) {
9708     assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
9709     Value *EntryPart;
9710     if (Step->getType()->isFloatingPointTy()) {
9711       Value *StartIdx =
9712           getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part);
9713       // Floating-point operations inherit FMF via the builder's flags.
9714       Value *MulOp = State.Builder.CreateFMul(StartIdx, Step);
9715       EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(),
9716                                             ScalarIV, MulOp);
9717     } else {
9718       Value *StartIdx =
9719           getRuntimeVF(State.Builder, Step->getType(), State.VF * Part);
9720       EntryPart = State.Builder.CreateAdd(
9721           ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction");
9722     }
9723     State.set(this, EntryPart, Part);
9724   }
9725 }
9726 
9727 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9728   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9729                                  State);
9730 }
9731 
9732 void VPBlendRecipe::execute(VPTransformState &State) {
9733   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9734   // We know that all PHIs in non-header blocks are converted into
9735   // selects, so we don't have to worry about the insertion order and we
9736   // can just use the builder.
9737   // At this point we generate the predication tree. There may be
9738   // duplications since this is a simple recursive scan, but future
9739   // optimizations will clean it up.
9740 
9741   unsigned NumIncoming = getNumIncomingValues();
9742 
9743   // Generate a sequence of selects of the form:
9744   // SELECT(Mask3, In3,
9745   //        SELECT(Mask2, In2,
9746   //               SELECT(Mask1, In1,
9747   //                      In0)))
9748   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9749   // are essentially undef are taken from In0.
9750   InnerLoopVectorizer::VectorParts Entry(State.UF);
9751   for (unsigned In = 0; In < NumIncoming; ++In) {
9752     for (unsigned Part = 0; Part < State.UF; ++Part) {
9753       // We might have single edge PHIs (blocks) - use an identity
9754       // 'select' for the first PHI operand.
9755       Value *In0 = State.get(getIncomingValue(In), Part);
9756       if (In == 0)
9757         Entry[Part] = In0; // Initialize with the first incoming value.
9758       else {
9759         // Select between the current value and the previous incoming edge
9760         // based on the incoming mask.
9761         Value *Cond = State.get(getMask(In), Part);
9762         Entry[Part] =
9763             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9764       }
9765     }
9766   }
9767   for (unsigned Part = 0; Part < State.UF; ++Part)
9768     State.set(this, Entry[Part], Part);
9769 }
9770 
9771 void VPInterleaveRecipe::execute(VPTransformState &State) {
9772   assert(!State.Instance && "Interleave group being replicated.");
9773   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9774                                       getStoredValues(), getMask());
9775 }
9776 
9777 void VPReductionRecipe::execute(VPTransformState &State) {
9778   assert(!State.Instance && "Reduction being replicated.");
9779   Value *PrevInChain = State.get(getChainOp(), 0);
9780   RecurKind Kind = RdxDesc->getRecurrenceKind();
9781   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9782   // Propagate the fast-math flags carried by the underlying instruction.
9783   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9784   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9785   for (unsigned Part = 0; Part < State.UF; ++Part) {
9786     Value *NewVecOp = State.get(getVecOp(), Part);
9787     if (VPValue *Cond = getCondOp()) {
9788       Value *NewCond = State.get(Cond, Part);
9789       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9790       Value *Iden = RdxDesc->getRecurrenceIdentity(
9791           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9792       Value *IdenVec =
9793           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9794       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9795       NewVecOp = Select;
9796     }
9797     Value *NewRed;
9798     Value *NextInChain;
9799     if (IsOrdered) {
9800       if (State.VF.isVector())
9801         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9802                                         PrevInChain);
9803       else
9804         NewRed = State.Builder.CreateBinOp(
9805             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9806             NewVecOp);
9807       PrevInChain = NewRed;
9808     } else {
9809       PrevInChain = State.get(getChainOp(), Part);
9810       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9811     }
9812     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9813       NextInChain =
9814           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9815                          NewRed, PrevInChain);
9816     } else if (IsOrdered)
9817       NextInChain = NewRed;
9818     else
9819       NextInChain = State.Builder.CreateBinOp(
9820           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9821           PrevInChain);
9822     State.set(this, NextInChain, Part);
9823   }
9824 }
9825 
9826 void VPReplicateRecipe::execute(VPTransformState &State) {
9827   if (State.Instance) { // Generate a single instance.
9828     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9829     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9830                                     IsPredicated, State);
9831     // Insert scalar instance packing it into a vector.
9832     if (AlsoPack && State.VF.isVector()) {
9833       // If we're constructing lane 0, initialize to start from poison.
9834       if (State.Instance->Lane.isFirstLane()) {
9835         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9836         Value *Poison = PoisonValue::get(
9837             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9838         State.set(this, Poison, State.Instance->Part);
9839       }
9840       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9841     }
9842     return;
9843   }
9844 
9845   // Generate scalar instances for all VF lanes of all UF parts, unless the
9846   // instruction is uniform inwhich case generate only the first lane for each
9847   // of the UF parts.
9848   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9849   assert((!State.VF.isScalable() || IsUniform) &&
9850          "Can't scalarize a scalable vector");
9851   for (unsigned Part = 0; Part < State.UF; ++Part)
9852     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9853       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9854                                       VPIteration(Part, Lane), IsPredicated,
9855                                       State);
9856 }
9857 
9858 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9859   assert(State.Instance && "Branch on Mask works only on single instance.");
9860 
9861   unsigned Part = State.Instance->Part;
9862   unsigned Lane = State.Instance->Lane.getKnownLane();
9863 
9864   Value *ConditionBit = nullptr;
9865   VPValue *BlockInMask = getMask();
9866   if (BlockInMask) {
9867     ConditionBit = State.get(BlockInMask, Part);
9868     if (ConditionBit->getType()->isVectorTy())
9869       ConditionBit = State.Builder.CreateExtractElement(
9870           ConditionBit, State.Builder.getInt32(Lane));
9871   } else // Block in mask is all-one.
9872     ConditionBit = State.Builder.getTrue();
9873 
9874   // Replace the temporary unreachable terminator with a new conditional branch,
9875   // whose two destinations will be set later when they are created.
9876   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9877   assert(isa<UnreachableInst>(CurrentTerminator) &&
9878          "Expected to replace unreachable terminator with conditional branch.");
9879   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9880   CondBr->setSuccessor(0, nullptr);
9881   ReplaceInstWithInst(CurrentTerminator, CondBr);
9882 }
9883 
9884 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9885   assert(State.Instance && "Predicated instruction PHI works per instance.");
9886   Instruction *ScalarPredInst =
9887       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9888   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9889   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9890   assert(PredicatingBB && "Predicated block has no single predecessor.");
9891   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9892          "operand must be VPReplicateRecipe");
9893 
9894   // By current pack/unpack logic we need to generate only a single phi node: if
9895   // a vector value for the predicated instruction exists at this point it means
9896   // the instruction has vector users only, and a phi for the vector value is
9897   // needed. In this case the recipe of the predicated instruction is marked to
9898   // also do that packing, thereby "hoisting" the insert-element sequence.
9899   // Otherwise, a phi node for the scalar value is needed.
9900   unsigned Part = State.Instance->Part;
9901   if (State.hasVectorValue(getOperand(0), Part)) {
9902     Value *VectorValue = State.get(getOperand(0), Part);
9903     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9904     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9905     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9906     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9907     if (State.hasVectorValue(this, Part))
9908       State.reset(this, VPhi, Part);
9909     else
9910       State.set(this, VPhi, Part);
9911     // NOTE: Currently we need to update the value of the operand, so the next
9912     // predicated iteration inserts its generated value in the correct vector.
9913     State.reset(getOperand(0), VPhi, Part);
9914   } else {
9915     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9916     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9917     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9918                      PredicatingBB);
9919     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9920     if (State.hasScalarValue(this, *State.Instance))
9921       State.reset(this, Phi, *State.Instance);
9922     else
9923       State.set(this, Phi, *State.Instance);
9924     // NOTE: Currently we need to update the value of the operand, so the next
9925     // predicated iteration inserts its generated value in the correct vector.
9926     State.reset(getOperand(0), Phi, *State.Instance);
9927   }
9928 }
9929 
9930 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9931   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9932 
9933   // Attempt to issue a wide load.
9934   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9935   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9936 
9937   assert((LI || SI) && "Invalid Load/Store instruction");
9938   assert((!SI || StoredValue) && "No stored value provided for widened store");
9939   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9940 
9941   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9942 
9943   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9944   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9945   bool CreateGatherScatter = !Consecutive;
9946 
9947   auto &Builder = State.Builder;
9948   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9949   bool isMaskRequired = getMask();
9950   if (isMaskRequired)
9951     for (unsigned Part = 0; Part < State.UF; ++Part)
9952       BlockInMaskParts[Part] = State.get(getMask(), Part);
9953 
9954   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9955     // Calculate the pointer for the specific unroll-part.
9956     GetElementPtrInst *PartPtr = nullptr;
9957 
9958     bool InBounds = false;
9959     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9960       InBounds = gep->isInBounds();
9961     if (Reverse) {
9962       // If the address is consecutive but reversed, then the
9963       // wide store needs to start at the last vector element.
9964       // RunTimeVF =  VScale * VF.getKnownMinValue()
9965       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9966       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9967       // NumElt = -Part * RunTimeVF
9968       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9969       // LastLane = 1 - RunTimeVF
9970       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9971       PartPtr =
9972           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9973       PartPtr->setIsInBounds(InBounds);
9974       PartPtr = cast<GetElementPtrInst>(
9975           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9976       PartPtr->setIsInBounds(InBounds);
9977       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9978         BlockInMaskParts[Part] =
9979             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9980     } else {
9981       Value *Increment =
9982           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9983       PartPtr = cast<GetElementPtrInst>(
9984           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9985       PartPtr->setIsInBounds(InBounds);
9986     }
9987 
9988     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9989     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9990   };
9991 
9992   // Handle Stores:
9993   if (SI) {
9994     State.ILV->setDebugLocFromInst(SI);
9995 
9996     for (unsigned Part = 0; Part < State.UF; ++Part) {
9997       Instruction *NewSI = nullptr;
9998       Value *StoredVal = State.get(StoredValue, Part);
9999       if (CreateGatherScatter) {
10000         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10001         Value *VectorGep = State.get(getAddr(), Part);
10002         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
10003                                             MaskPart);
10004       } else {
10005         if (Reverse) {
10006           // If we store to reverse consecutive memory locations, then we need
10007           // to reverse the order of elements in the stored value.
10008           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
10009           // We don't want to update the value in the map as it might be used in
10010           // another expression. So don't call resetVectorValue(StoredVal).
10011         }
10012         auto *VecPtr =
10013             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10014         if (isMaskRequired)
10015           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
10016                                             BlockInMaskParts[Part]);
10017         else
10018           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10019       }
10020       State.ILV->addMetadata(NewSI, SI);
10021     }
10022     return;
10023   }
10024 
10025   // Handle loads.
10026   assert(LI && "Must have a load instruction");
10027   State.ILV->setDebugLocFromInst(LI);
10028   for (unsigned Part = 0; Part < State.UF; ++Part) {
10029     Value *NewLI;
10030     if (CreateGatherScatter) {
10031       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10032       Value *VectorGep = State.get(getAddr(), Part);
10033       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10034                                          nullptr, "wide.masked.gather");
10035       State.ILV->addMetadata(NewLI, LI);
10036     } else {
10037       auto *VecPtr =
10038           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10039       if (isMaskRequired)
10040         NewLI = Builder.CreateMaskedLoad(
10041             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10042             PoisonValue::get(DataTy), "wide.masked.load");
10043       else
10044         NewLI =
10045             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10046 
10047       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10048       State.ILV->addMetadata(NewLI, LI);
10049       if (Reverse)
10050         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10051     }
10052 
10053     State.set(this, NewLI, Part);
10054   }
10055 }
10056 
10057 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10058 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10059 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10060 // for predication.
10061 static ScalarEpilogueLowering getScalarEpilogueLowering(
10062     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10063     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10064     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10065     LoopVectorizationLegality &LVL) {
10066   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10067   // don't look at hints or options, and don't request a scalar epilogue.
10068   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10069   // LoopAccessInfo (due to code dependency and not being able to reliably get
10070   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10071   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10072   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10073   // back to the old way and vectorize with versioning when forced. See D81345.)
10074   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10075                                                       PGSOQueryType::IRPass) &&
10076                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10077     return CM_ScalarEpilogueNotAllowedOptSize;
10078 
10079   // 2) If set, obey the directives
10080   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10081     switch (PreferPredicateOverEpilogue) {
10082     case PreferPredicateTy::ScalarEpilogue:
10083       return CM_ScalarEpilogueAllowed;
10084     case PreferPredicateTy::PredicateElseScalarEpilogue:
10085       return CM_ScalarEpilogueNotNeededUsePredicate;
10086     case PreferPredicateTy::PredicateOrDontVectorize:
10087       return CM_ScalarEpilogueNotAllowedUsePredicate;
10088     };
10089   }
10090 
10091   // 3) If set, obey the hints
10092   switch (Hints.getPredicate()) {
10093   case LoopVectorizeHints::FK_Enabled:
10094     return CM_ScalarEpilogueNotNeededUsePredicate;
10095   case LoopVectorizeHints::FK_Disabled:
10096     return CM_ScalarEpilogueAllowed;
10097   };
10098 
10099   // 4) if the TTI hook indicates this is profitable, request predication.
10100   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10101                                        LVL.getLAI()))
10102     return CM_ScalarEpilogueNotNeededUsePredicate;
10103 
10104   return CM_ScalarEpilogueAllowed;
10105 }
10106 
10107 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10108   // If Values have been set for this Def return the one relevant for \p Part.
10109   if (hasVectorValue(Def, Part))
10110     return Data.PerPartOutput[Def][Part];
10111 
10112   if (!hasScalarValue(Def, {Part, 0})) {
10113     Value *IRV = Def->getLiveInIRValue();
10114     Value *B = ILV->getBroadcastInstrs(IRV);
10115     set(Def, B, Part);
10116     return B;
10117   }
10118 
10119   Value *ScalarValue = get(Def, {Part, 0});
10120   // If we aren't vectorizing, we can just copy the scalar map values over
10121   // to the vector map.
10122   if (VF.isScalar()) {
10123     set(Def, ScalarValue, Part);
10124     return ScalarValue;
10125   }
10126 
10127   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10128   bool IsUniform = RepR && RepR->isUniform();
10129 
10130   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10131   // Check if there is a scalar value for the selected lane.
10132   if (!hasScalarValue(Def, {Part, LastLane})) {
10133     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10134     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) ||
10135             isa<VPScalarIVStepsRecipe>(Def->getDef())) &&
10136            "unexpected recipe found to be invariant");
10137     IsUniform = true;
10138     LastLane = 0;
10139   }
10140 
10141   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10142   // Set the insert point after the last scalarized instruction or after the
10143   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10144   // will directly follow the scalar definitions.
10145   auto OldIP = Builder.saveIP();
10146   auto NewIP =
10147       isa<PHINode>(LastInst)
10148           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10149           : std::next(BasicBlock::iterator(LastInst));
10150   Builder.SetInsertPoint(&*NewIP);
10151 
10152   // However, if we are vectorizing, we need to construct the vector values.
10153   // If the value is known to be uniform after vectorization, we can just
10154   // broadcast the scalar value corresponding to lane zero for each unroll
10155   // iteration. Otherwise, we construct the vector values using
10156   // insertelement instructions. Since the resulting vectors are stored in
10157   // State, we will only generate the insertelements once.
10158   Value *VectorValue = nullptr;
10159   if (IsUniform) {
10160     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10161     set(Def, VectorValue, Part);
10162   } else {
10163     // Initialize packing with insertelements to start from undef.
10164     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10165     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10166     set(Def, Undef, Part);
10167     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10168       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10169     VectorValue = get(Def, Part);
10170   }
10171   Builder.restoreIP(OldIP);
10172   return VectorValue;
10173 }
10174 
10175 // Process the loop in the VPlan-native vectorization path. This path builds
10176 // VPlan upfront in the vectorization pipeline, which allows to apply
10177 // VPlan-to-VPlan transformations from the very beginning without modifying the
10178 // input LLVM IR.
10179 static bool processLoopInVPlanNativePath(
10180     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10181     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10182     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10183     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10184     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10185     LoopVectorizationRequirements &Requirements) {
10186 
10187   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10188     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10189     return false;
10190   }
10191   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10192   Function *F = L->getHeader()->getParent();
10193   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10194 
10195   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10196       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10197 
10198   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10199                                 &Hints, IAI);
10200   // Use the planner for outer loop vectorization.
10201   // TODO: CM is not used at this point inside the planner. Turn CM into an
10202   // optional argument if we don't need it in the future.
10203   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10204                                Requirements, ORE);
10205 
10206   // Get user vectorization factor.
10207   ElementCount UserVF = Hints.getWidth();
10208 
10209   CM.collectElementTypesForWidening();
10210 
10211   // Plan how to best vectorize, return the best VF and its cost.
10212   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10213 
10214   // If we are stress testing VPlan builds, do not attempt to generate vector
10215   // code. Masked vector code generation support will follow soon.
10216   // Also, do not attempt to vectorize if no vector code will be produced.
10217   if (VPlanBuildStressTest || EnableVPlanPredication ||
10218       VectorizationFactor::Disabled() == VF)
10219     return false;
10220 
10221   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10222 
10223   {
10224     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10225                              F->getParent()->getDataLayout());
10226     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10227                            &CM, BFI, PSI, Checks);
10228     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10229                       << L->getHeader()->getParent()->getName() << "\"\n");
10230     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10231   }
10232 
10233   // Mark the loop as already vectorized to avoid vectorizing again.
10234   Hints.setAlreadyVectorized();
10235   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10236   return true;
10237 }
10238 
10239 // Emit a remark if there are stores to floats that required a floating point
10240 // extension. If the vectorized loop was generated with floating point there
10241 // will be a performance penalty from the conversion overhead and the change in
10242 // the vector width.
10243 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10244   SmallVector<Instruction *, 4> Worklist;
10245   for (BasicBlock *BB : L->getBlocks()) {
10246     for (Instruction &Inst : *BB) {
10247       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10248         if (S->getValueOperand()->getType()->isFloatTy())
10249           Worklist.push_back(S);
10250       }
10251     }
10252   }
10253 
10254   // Traverse the floating point stores upwards searching, for floating point
10255   // conversions.
10256   SmallPtrSet<const Instruction *, 4> Visited;
10257   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10258   while (!Worklist.empty()) {
10259     auto *I = Worklist.pop_back_val();
10260     if (!L->contains(I))
10261       continue;
10262     if (!Visited.insert(I).second)
10263       continue;
10264 
10265     // Emit a remark if the floating point store required a floating
10266     // point conversion.
10267     // TODO: More work could be done to identify the root cause such as a
10268     // constant or a function return type and point the user to it.
10269     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10270       ORE->emit([&]() {
10271         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10272                                           I->getDebugLoc(), L->getHeader())
10273                << "floating point conversion changes vector width. "
10274                << "Mixed floating point precision requires an up/down "
10275                << "cast that will negatively impact performance.";
10276       });
10277 
10278     for (Use &Op : I->operands())
10279       if (auto *OpI = dyn_cast<Instruction>(Op))
10280         Worklist.push_back(OpI);
10281   }
10282 }
10283 
10284 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10285     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10286                                !EnableLoopInterleaving),
10287       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10288                               !EnableLoopVectorization) {}
10289 
10290 bool LoopVectorizePass::processLoop(Loop *L) {
10291   assert((EnableVPlanNativePath || L->isInnermost()) &&
10292          "VPlan-native path is not enabled. Only process inner loops.");
10293 
10294 #ifndef NDEBUG
10295   const std::string DebugLocStr = getDebugLocString(L);
10296 #endif /* NDEBUG */
10297 
10298   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
10299                     << L->getHeader()->getParent()->getName() << "' from "
10300                     << DebugLocStr << "\n");
10301 
10302   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10303 
10304   LLVM_DEBUG(
10305       dbgs() << "LV: Loop hints:"
10306              << " force="
10307              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10308                      ? "disabled"
10309                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10310                             ? "enabled"
10311                             : "?"))
10312              << " width=" << Hints.getWidth()
10313              << " interleave=" << Hints.getInterleave() << "\n");
10314 
10315   // Function containing loop
10316   Function *F = L->getHeader()->getParent();
10317 
10318   // Looking at the diagnostic output is the only way to determine if a loop
10319   // was vectorized (other than looking at the IR or machine code), so it
10320   // is important to generate an optimization remark for each loop. Most of
10321   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10322   // generated as OptimizationRemark and OptimizationRemarkMissed are
10323   // less verbose reporting vectorized loops and unvectorized loops that may
10324   // benefit from vectorization, respectively.
10325 
10326   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10327     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10328     return false;
10329   }
10330 
10331   PredicatedScalarEvolution PSE(*SE, *L);
10332 
10333   // Check if it is legal to vectorize the loop.
10334   LoopVectorizationRequirements Requirements;
10335   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10336                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10337   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10338     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10339     Hints.emitRemarkWithHints();
10340     return false;
10341   }
10342 
10343   // Check the function attributes and profiles to find out if this function
10344   // should be optimized for size.
10345   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10346       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10347 
10348   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10349   // here. They may require CFG and instruction level transformations before
10350   // even evaluating whether vectorization is profitable. Since we cannot modify
10351   // the incoming IR, we need to build VPlan upfront in the vectorization
10352   // pipeline.
10353   if (!L->isInnermost())
10354     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10355                                         ORE, BFI, PSI, Hints, Requirements);
10356 
10357   assert(L->isInnermost() && "Inner loop expected.");
10358 
10359   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10360   // count by optimizing for size, to minimize overheads.
10361   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10362   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10363     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10364                       << "This loop is worth vectorizing only if no scalar "
10365                       << "iteration overheads are incurred.");
10366     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10367       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10368     else {
10369       LLVM_DEBUG(dbgs() << "\n");
10370       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10371     }
10372   }
10373 
10374   // Check the function attributes to see if implicit floats are allowed.
10375   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10376   // an integer loop and the vector instructions selected are purely integer
10377   // vector instructions?
10378   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10379     reportVectorizationFailure(
10380         "Can't vectorize when the NoImplicitFloat attribute is used",
10381         "loop not vectorized due to NoImplicitFloat attribute",
10382         "NoImplicitFloat", ORE, L);
10383     Hints.emitRemarkWithHints();
10384     return false;
10385   }
10386 
10387   // Check if the target supports potentially unsafe FP vectorization.
10388   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10389   // for the target we're vectorizing for, to make sure none of the
10390   // additional fp-math flags can help.
10391   if (Hints.isPotentiallyUnsafe() &&
10392       TTI->isFPVectorizationPotentiallyUnsafe()) {
10393     reportVectorizationFailure(
10394         "Potentially unsafe FP op prevents vectorization",
10395         "loop not vectorized due to unsafe FP support.",
10396         "UnsafeFP", ORE, L);
10397     Hints.emitRemarkWithHints();
10398     return false;
10399   }
10400 
10401   bool AllowOrderedReductions;
10402   // If the flag is set, use that instead and override the TTI behaviour.
10403   if (ForceOrderedReductions.getNumOccurrences() > 0)
10404     AllowOrderedReductions = ForceOrderedReductions;
10405   else
10406     AllowOrderedReductions = TTI->enableOrderedReductions();
10407   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10408     ORE->emit([&]() {
10409       auto *ExactFPMathInst = Requirements.getExactFPInst();
10410       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10411                                                  ExactFPMathInst->getDebugLoc(),
10412                                                  ExactFPMathInst->getParent())
10413              << "loop not vectorized: cannot prove it is safe to reorder "
10414                 "floating-point operations";
10415     });
10416     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10417                          "reorder floating-point operations\n");
10418     Hints.emitRemarkWithHints();
10419     return false;
10420   }
10421 
10422   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10423   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10424 
10425   // If an override option has been passed in for interleaved accesses, use it.
10426   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10427     UseInterleaved = EnableInterleavedMemAccesses;
10428 
10429   // Analyze interleaved memory accesses.
10430   if (UseInterleaved) {
10431     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10432   }
10433 
10434   // Use the cost model.
10435   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10436                                 F, &Hints, IAI);
10437   CM.collectValuesToIgnore();
10438   CM.collectElementTypesForWidening();
10439 
10440   // Use the planner for vectorization.
10441   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10442                                Requirements, ORE);
10443 
10444   // Get user vectorization factor and interleave count.
10445   ElementCount UserVF = Hints.getWidth();
10446   unsigned UserIC = Hints.getInterleave();
10447 
10448   // Plan how to best vectorize, return the best VF and its cost.
10449   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10450 
10451   VectorizationFactor VF = VectorizationFactor::Disabled();
10452   unsigned IC = 1;
10453 
10454   if (MaybeVF) {
10455     VF = *MaybeVF;
10456     // Select the interleave count.
10457     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10458   }
10459 
10460   // Identify the diagnostic messages that should be produced.
10461   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10462   bool VectorizeLoop = true, InterleaveLoop = true;
10463   if (VF.Width.isScalar()) {
10464     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10465     VecDiagMsg = std::make_pair(
10466         "VectorizationNotBeneficial",
10467         "the cost-model indicates that vectorization is not beneficial");
10468     VectorizeLoop = false;
10469   }
10470 
10471   if (!MaybeVF && UserIC > 1) {
10472     // Tell the user interleaving was avoided up-front, despite being explicitly
10473     // requested.
10474     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10475                          "interleaving should be avoided up front\n");
10476     IntDiagMsg = std::make_pair(
10477         "InterleavingAvoided",
10478         "Ignoring UserIC, because interleaving was avoided up front");
10479     InterleaveLoop = false;
10480   } else if (IC == 1 && UserIC <= 1) {
10481     // Tell the user interleaving is not beneficial.
10482     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10483     IntDiagMsg = std::make_pair(
10484         "InterleavingNotBeneficial",
10485         "the cost-model indicates that interleaving is not beneficial");
10486     InterleaveLoop = false;
10487     if (UserIC == 1) {
10488       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10489       IntDiagMsg.second +=
10490           " and is explicitly disabled or interleave count is set to 1";
10491     }
10492   } else if (IC > 1 && UserIC == 1) {
10493     // Tell the user interleaving is beneficial, but it explicitly disabled.
10494     LLVM_DEBUG(
10495         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10496     IntDiagMsg = std::make_pair(
10497         "InterleavingBeneficialButDisabled",
10498         "the cost-model indicates that interleaving is beneficial "
10499         "but is explicitly disabled or interleave count is set to 1");
10500     InterleaveLoop = false;
10501   }
10502 
10503   // Override IC if user provided an interleave count.
10504   IC = UserIC > 0 ? UserIC : IC;
10505 
10506   // Emit diagnostic messages, if any.
10507   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10508   if (!VectorizeLoop && !InterleaveLoop) {
10509     // Do not vectorize or interleaving the loop.
10510     ORE->emit([&]() {
10511       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10512                                       L->getStartLoc(), L->getHeader())
10513              << VecDiagMsg.second;
10514     });
10515     ORE->emit([&]() {
10516       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10517                                       L->getStartLoc(), L->getHeader())
10518              << IntDiagMsg.second;
10519     });
10520     return false;
10521   } else if (!VectorizeLoop && InterleaveLoop) {
10522     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10523     ORE->emit([&]() {
10524       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10525                                         L->getStartLoc(), L->getHeader())
10526              << VecDiagMsg.second;
10527     });
10528   } else if (VectorizeLoop && !InterleaveLoop) {
10529     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10530                       << ") in " << DebugLocStr << '\n');
10531     ORE->emit([&]() {
10532       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10533                                         L->getStartLoc(), L->getHeader())
10534              << IntDiagMsg.second;
10535     });
10536   } else if (VectorizeLoop && InterleaveLoop) {
10537     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10538                       << ") in " << DebugLocStr << '\n');
10539     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10540   }
10541 
10542   bool DisableRuntimeUnroll = false;
10543   MDNode *OrigLoopID = L->getLoopID();
10544   {
10545     // Optimistically generate runtime checks. Drop them if they turn out to not
10546     // be profitable. Limit the scope of Checks, so the cleanup happens
10547     // immediately after vector codegeneration is done.
10548     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10549                              F->getParent()->getDataLayout());
10550     if (!VF.Width.isScalar() || IC > 1)
10551       Checks.Create(L, *LVL.getLAI(), PSE.getPredicate());
10552 
10553     using namespace ore;
10554     if (!VectorizeLoop) {
10555       assert(IC > 1 && "interleave count should not be 1 or 0");
10556       // If we decided that it is not legal to vectorize the loop, then
10557       // interleave it.
10558       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10559                                  &CM, BFI, PSI, Checks);
10560 
10561       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10562       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10563 
10564       ORE->emit([&]() {
10565         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10566                                   L->getHeader())
10567                << "interleaved loop (interleaved count: "
10568                << NV("InterleaveCount", IC) << ")";
10569       });
10570     } else {
10571       // If we decided that it is *legal* to vectorize the loop, then do it.
10572 
10573       // Consider vectorizing the epilogue too if it's profitable.
10574       VectorizationFactor EpilogueVF =
10575           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10576       if (EpilogueVF.Width.isVector()) {
10577 
10578         // The first pass vectorizes the main loop and creates a scalar epilogue
10579         // to be vectorized by executing the plan (potentially with a different
10580         // factor) again shortly afterwards.
10581         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10582         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10583                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10584 
10585         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10586         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10587                         DT);
10588         ++LoopsVectorized;
10589 
10590         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10591         formLCSSARecursively(*L, *DT, LI, SE);
10592 
10593         // Second pass vectorizes the epilogue and adjusts the control flow
10594         // edges from the first pass.
10595         EPI.MainLoopVF = EPI.EpilogueVF;
10596         EPI.MainLoopUF = EPI.EpilogueUF;
10597         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10598                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10599                                                  Checks);
10600 
10601         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10602 
10603         // Ensure that the start values for any VPReductionPHIRecipes are
10604         // updated before vectorising the epilogue loop.
10605         VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock();
10606         for (VPRecipeBase &R : Header->phis()) {
10607           if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10608             if (auto *Resume = MainILV.getReductionResumeValue(
10609                     ReductionPhi->getRecurrenceDescriptor())) {
10610               VPValue *StartVal = new VPValue(Resume);
10611               BestEpiPlan.addExternalDef(StartVal);
10612               ReductionPhi->setOperand(0, StartVal);
10613             }
10614           }
10615         }
10616 
10617         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10618                         DT);
10619         ++LoopsEpilogueVectorized;
10620 
10621         if (!MainILV.areSafetyChecksAdded())
10622           DisableRuntimeUnroll = true;
10623       } else {
10624         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10625                                &LVL, &CM, BFI, PSI, Checks);
10626 
10627         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10628         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10629         ++LoopsVectorized;
10630 
10631         // Add metadata to disable runtime unrolling a scalar loop when there
10632         // are no runtime checks about strides and memory. A scalar loop that is
10633         // rarely used is not worth unrolling.
10634         if (!LB.areSafetyChecksAdded())
10635           DisableRuntimeUnroll = true;
10636       }
10637       // Report the vectorization decision.
10638       ORE->emit([&]() {
10639         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10640                                   L->getHeader())
10641                << "vectorized loop (vectorization width: "
10642                << NV("VectorizationFactor", VF.Width)
10643                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10644       });
10645     }
10646 
10647     if (ORE->allowExtraAnalysis(LV_NAME))
10648       checkMixedPrecision(L, ORE);
10649   }
10650 
10651   Optional<MDNode *> RemainderLoopID =
10652       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10653                                       LLVMLoopVectorizeFollowupEpilogue});
10654   if (RemainderLoopID.hasValue()) {
10655     L->setLoopID(RemainderLoopID.getValue());
10656   } else {
10657     if (DisableRuntimeUnroll)
10658       AddRuntimeUnrollDisableMetaData(L);
10659 
10660     // Mark the loop as already vectorized to avoid vectorizing again.
10661     Hints.setAlreadyVectorized();
10662   }
10663 
10664   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10665   return true;
10666 }
10667 
10668 LoopVectorizeResult LoopVectorizePass::runImpl(
10669     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10670     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10671     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10672     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10673     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10674   SE = &SE_;
10675   LI = &LI_;
10676   TTI = &TTI_;
10677   DT = &DT_;
10678   BFI = &BFI_;
10679   TLI = TLI_;
10680   AA = &AA_;
10681   AC = &AC_;
10682   GetLAA = &GetLAA_;
10683   DB = &DB_;
10684   ORE = &ORE_;
10685   PSI = PSI_;
10686 
10687   // Don't attempt if
10688   // 1. the target claims to have no vector registers, and
10689   // 2. interleaving won't help ILP.
10690   //
10691   // The second condition is necessary because, even if the target has no
10692   // vector registers, loop vectorization may still enable scalar
10693   // interleaving.
10694   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10695       TTI->getMaxInterleaveFactor(1) < 2)
10696     return LoopVectorizeResult(false, false);
10697 
10698   bool Changed = false, CFGChanged = false;
10699 
10700   // The vectorizer requires loops to be in simplified form.
10701   // Since simplification may add new inner loops, it has to run before the
10702   // legality and profitability checks. This means running the loop vectorizer
10703   // will simplify all loops, regardless of whether anything end up being
10704   // vectorized.
10705   for (auto &L : *LI)
10706     Changed |= CFGChanged |=
10707         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10708 
10709   // Build up a worklist of inner-loops to vectorize. This is necessary as
10710   // the act of vectorizing or partially unrolling a loop creates new loops
10711   // and can invalidate iterators across the loops.
10712   SmallVector<Loop *, 8> Worklist;
10713 
10714   for (Loop *L : *LI)
10715     collectSupportedLoops(*L, LI, ORE, Worklist);
10716 
10717   LoopsAnalyzed += Worklist.size();
10718 
10719   // Now walk the identified inner loops.
10720   while (!Worklist.empty()) {
10721     Loop *L = Worklist.pop_back_val();
10722 
10723     // For the inner loops we actually process, form LCSSA to simplify the
10724     // transform.
10725     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10726 
10727     Changed |= CFGChanged |= processLoop(L);
10728   }
10729 
10730   // Process each loop nest in the function.
10731   return LoopVectorizeResult(Changed, CFGChanged);
10732 }
10733 
10734 PreservedAnalyses LoopVectorizePass::run(Function &F,
10735                                          FunctionAnalysisManager &AM) {
10736     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10737     auto &LI = AM.getResult<LoopAnalysis>(F);
10738     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10739     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10740     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10741     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10742     auto &AA = AM.getResult<AAManager>(F);
10743     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10744     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10745     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10746 
10747     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10748     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10749         [&](Loop &L) -> const LoopAccessInfo & {
10750       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10751                                         TLI, TTI, nullptr, nullptr, nullptr};
10752       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10753     };
10754     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10755     ProfileSummaryInfo *PSI =
10756         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10757     LoopVectorizeResult Result =
10758         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10759     if (!Result.MadeAnyChange)
10760       return PreservedAnalyses::all();
10761     PreservedAnalyses PA;
10762 
10763     // We currently do not preserve loopinfo/dominator analyses with outer loop
10764     // vectorization. Until this is addressed, mark these analyses as preserved
10765     // only for non-VPlan-native path.
10766     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10767     if (!EnableVPlanNativePath) {
10768       PA.preserve<LoopAnalysis>();
10769       PA.preserve<DominatorTreeAnalysis>();
10770     }
10771 
10772     if (Result.MadeCFGChange) {
10773       // Making CFG changes likely means a loop got vectorized. Indicate that
10774       // extra simplification passes should be run.
10775       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10776       // be run if runtime checks have been added.
10777       AM.getResult<ShouldRunExtraVectorPasses>(F);
10778       PA.preserve<ShouldRunExtraVectorPasses>();
10779     } else {
10780       PA.preserveSet<CFGAnalyses>();
10781     }
10782     return PA;
10783 }
10784 
10785 void LoopVectorizePass::printPipeline(
10786     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10787   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10788       OS, MapClassName2PassName);
10789 
10790   OS << "<";
10791   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10792   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10793   OS << ">";
10794 }
10795