1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/Metadata.h"
116 #include "llvm/IR/Module.h"
117 #include "llvm/IR/Operator.h"
118 #include "llvm/IR/PatternMatch.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <functional>
147 #include <iterator>
148 #include <limits>
149 #include <map>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 #ifndef NDEBUG
161 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
162 #endif
163 
164 /// @{
165 /// Metadata attribute names
166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
167 const char LLVMLoopVectorizeFollowupVectorized[] =
168     "llvm.loop.vectorize.followup_vectorized";
169 const char LLVMLoopVectorizeFollowupEpilogue[] =
170     "llvm.loop.vectorize.followup_epilogue";
171 /// @}
172 
173 STATISTIC(LoopsVectorized, "Number of loops vectorized");
174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
176 
177 static cl::opt<bool> EnableEpilogueVectorization(
178     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
179     cl::desc("Enable vectorization of epilogue loops."));
180 
181 static cl::opt<unsigned> EpilogueVectorizationForceVF(
182     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
183     cl::desc("When epilogue vectorization is enabled, and a value greater than "
184              "1 is specified, forces the given VF for all applicable epilogue "
185              "loops."));
186 
187 static cl::opt<unsigned> EpilogueVectorizationMinVF(
188     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
189     cl::desc("Only loops with vectorization factor equal to or larger than "
190              "the specified value are considered for epilogue vectorization."));
191 
192 /// Loops with a known constant trip count below this number are vectorized only
193 /// if no scalar iteration overheads are incurred.
194 static cl::opt<unsigned> TinyTripCountVectorThreshold(
195     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
196     cl::desc("Loops with a constant trip count that is smaller than this "
197              "value are vectorized only if no scalar iteration overheads "
198              "are incurred."));
199 
200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
201     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
202     cl::desc("The maximum allowed number of runtime memory checks with a "
203              "vectorize(enable) pragma."));
204 
205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
206 // that predication is preferred, and this lists all options. I.e., the
207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
208 // and predicate the instructions accordingly. If tail-folding fails, there are
209 // different fallback strategies depending on these values:
210 namespace PreferPredicateTy {
211   enum Option {
212     ScalarEpilogue = 0,
213     PredicateElseScalarEpilogue,
214     PredicateOrDontVectorize
215   };
216 } // namespace PreferPredicateTy
217 
218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
219     "prefer-predicate-over-epilogue",
220     cl::init(PreferPredicateTy::ScalarEpilogue),
221     cl::Hidden,
222     cl::desc("Tail-folding and predication preferences over creating a scalar "
223              "epilogue loop."),
224     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
225                          "scalar-epilogue",
226                          "Don't tail-predicate loops, create scalar epilogue"),
227               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
228                          "predicate-else-scalar-epilogue",
229                          "prefer tail-folding, create scalar epilogue if tail "
230                          "folding fails."),
231               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
232                          "predicate-dont-vectorize",
233                          "prefers tail-folding, don't attempt vectorization if "
234                          "tail-folding fails.")));
235 
236 static cl::opt<bool> MaximizeBandwidth(
237     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
238     cl::desc("Maximize bandwidth when selecting vectorization factor which "
239              "will be determined by the smallest type in loop."));
240 
241 static cl::opt<bool> EnableInterleavedMemAccesses(
242     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
243     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
244 
245 /// An interleave-group may need masking if it resides in a block that needs
246 /// predication, or in order to mask away gaps.
247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
248     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
249     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
250 
251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
252     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
253     cl::desc("We don't interleave loops with a estimated constant trip count "
254              "below this number"));
255 
256 static cl::opt<unsigned> ForceTargetNumScalarRegs(
257     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of scalar registers."));
259 
260 static cl::opt<unsigned> ForceTargetNumVectorRegs(
261     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's number of vector registers."));
263 
264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
265     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
266     cl::desc("A flag that overrides the target's max interleave factor for "
267              "scalar loops."));
268 
269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
270     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
271     cl::desc("A flag that overrides the target's max interleave factor for "
272              "vectorized loops."));
273 
274 static cl::opt<unsigned> ForceTargetInstructionCost(
275     "force-target-instruction-cost", cl::init(0), cl::Hidden,
276     cl::desc("A flag that overrides the target's expected cost for "
277              "an instruction to a single constant value. Mostly "
278              "useful for getting consistent testing."));
279 
280 static cl::opt<bool> ForceTargetSupportsScalableVectors(
281     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
282     cl::desc(
283         "Pretend that scalable vectors are supported, even if the target does "
284         "not support them. This flag should only be used for testing."));
285 
286 static cl::opt<unsigned> SmallLoopCost(
287     "small-loop-cost", cl::init(20), cl::Hidden,
288     cl::desc(
289         "The cost of a loop that is considered 'small' by the interleaver."));
290 
291 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
292     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
293     cl::desc("Enable the use of the block frequency analysis to access PGO "
294              "heuristics minimizing code growth in cold regions and being more "
295              "aggressive in hot regions."));
296 
297 // Runtime interleave loops for load/store throughput.
298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
299     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
300     cl::desc(
301         "Enable runtime interleaving until load/store ports are saturated"));
302 
303 /// Interleave small loops with scalar reductions.
304 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
305     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
306     cl::desc("Enable interleaving for loops with small iteration counts that "
307              "contain scalar reductions to expose ILP."));
308 
309 /// The number of stores in a loop that are allowed to need predication.
310 static cl::opt<unsigned> NumberOfStoresToPredicate(
311     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
312     cl::desc("Max number of stores to be predicated behind an if."));
313 
314 static cl::opt<bool> EnableIndVarRegisterHeur(
315     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
316     cl::desc("Count the induction variable only once when interleaving"));
317 
318 static cl::opt<bool> EnableCondStoresVectorization(
319     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
320     cl::desc("Enable if predication of stores during vectorization."));
321 
322 static cl::opt<unsigned> MaxNestedScalarReductionIC(
323     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
324     cl::desc("The maximum interleave count to use when interleaving a scalar "
325              "reduction in a nested loop."));
326 
327 static cl::opt<bool>
328     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
329                            cl::Hidden,
330                            cl::desc("Prefer in-loop vector reductions, "
331                                     "overriding the targets preference."));
332 
333 static cl::opt<bool> ForceOrderedReductions(
334     "force-ordered-reductions", cl::init(false), cl::Hidden,
335     cl::desc("Enable the vectorisation of loops with in-order (strict) "
336              "FP reductions"));
337 
338 static cl::opt<bool> PreferPredicatedReductionSelect(
339     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
340     cl::desc(
341         "Prefer predicating a reduction operation over an after loop select."));
342 
343 cl::opt<bool> EnableVPlanNativePath(
344     "enable-vplan-native-path", cl::init(false), cl::Hidden,
345     cl::desc("Enable VPlan-native vectorization path with "
346              "support for outer loop vectorization."));
347 
348 // FIXME: Remove this switch once we have divergence analysis. Currently we
349 // assume divergent non-backedge branches when this switch is true.
350 cl::opt<bool> EnableVPlanPredication(
351     "enable-vplan-predication", cl::init(false), cl::Hidden,
352     cl::desc("Enable VPlan-native vectorization path predicator with "
353              "support for outer loop vectorization."));
354 
355 // This flag enables the stress testing of the VPlan H-CFG construction in the
356 // VPlan-native vectorization path. It must be used in conjuction with
357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
358 // verification of the H-CFGs built.
359 static cl::opt<bool> VPlanBuildStressTest(
360     "vplan-build-stress-test", cl::init(false), cl::Hidden,
361     cl::desc(
362         "Build VPlan for every supported loop nest in the function and bail "
363         "out right after the build (stress test the VPlan H-CFG construction "
364         "in the VPlan-native vectorization path)."));
365 
366 cl::opt<bool> llvm::EnableLoopInterleaving(
367     "interleave-loops", cl::init(true), cl::Hidden,
368     cl::desc("Enable loop interleaving in Loop vectorization passes"));
369 cl::opt<bool> llvm::EnableLoopVectorization(
370     "vectorize-loops", cl::init(true), cl::Hidden,
371     cl::desc("Run the Loop vectorization passes"));
372 
373 cl::opt<bool> PrintVPlansInDotFormat(
374     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
375     cl::desc("Use dot format instead of plain text when dumping VPlans"));
376 
377 /// A helper function that returns true if the given type is irregular. The
378 /// type is irregular if its allocated size doesn't equal the store size of an
379 /// element of the corresponding vector type.
380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
381   // Determine if an array of N elements of type Ty is "bitcast compatible"
382   // with a <N x Ty> vector.
383   // This is only true if there is no padding between the array elements.
384   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
385 }
386 
387 /// A helper function that returns the reciprocal of the block probability of
388 /// predicated blocks. If we return X, we are assuming the predicated block
389 /// will execute once for every X iterations of the loop header.
390 ///
391 /// TODO: We should use actual block probability here, if available. Currently,
392 ///       we always assume predicated blocks have a 50% chance of executing.
393 static unsigned getReciprocalPredBlockProb() { return 2; }
394 
395 /// A helper function that returns an integer or floating-point constant with
396 /// value C.
397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
398   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
399                            : ConstantFP::get(Ty, C);
400 }
401 
402 /// Returns "best known" trip count for the specified loop \p L as defined by
403 /// the following procedure:
404 ///   1) Returns exact trip count if it is known.
405 ///   2) Returns expected trip count according to profile data if any.
406 ///   3) Returns upper bound estimate if it is known.
407 ///   4) Returns None if all of the above failed.
408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
409   // Check if exact trip count is known.
410   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
411     return ExpectedTC;
412 
413   // Check if there is an expected trip count available from profile data.
414   if (LoopVectorizeWithBlockFrequency)
415     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
416       return EstimatedTC;
417 
418   // Check if upper bound estimate is known.
419   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
420     return ExpectedTC;
421 
422   return None;
423 }
424 
425 // Forward declare GeneratedRTChecks.
426 class GeneratedRTChecks;
427 
428 namespace llvm {
429 
430 AnalysisKey ShouldRunExtraVectorPasses::Key;
431 
432 /// InnerLoopVectorizer vectorizes loops which contain only one basic
433 /// block to a specified vectorization factor (VF).
434 /// This class performs the widening of scalars into vectors, or multiple
435 /// scalars. This class also implements the following features:
436 /// * It inserts an epilogue loop for handling loops that don't have iteration
437 ///   counts that are known to be a multiple of the vectorization factor.
438 /// * It handles the code generation for reduction variables.
439 /// * Scalarization (implementation using scalars) of un-vectorizable
440 ///   instructions.
441 /// InnerLoopVectorizer does not perform any vectorization-legality
442 /// checks, and relies on the caller to check for the different legality
443 /// aspects. The InnerLoopVectorizer relies on the
444 /// LoopVectorizationLegality class to provide information about the induction
445 /// and reduction variables that were found to a given vectorization factor.
446 class InnerLoopVectorizer {
447 public:
448   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
449                       LoopInfo *LI, DominatorTree *DT,
450                       const TargetLibraryInfo *TLI,
451                       const TargetTransformInfo *TTI, AssumptionCache *AC,
452                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
453                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
454                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
455                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
456       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
457         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
458         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
459         PSI(PSI), RTChecks(RTChecks) {
460     // Query this against the original loop and save it here because the profile
461     // of the original loop header may change as the transformation happens.
462     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
463         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
464   }
465 
466   virtual ~InnerLoopVectorizer() = default;
467 
468   /// Create a new empty loop that will contain vectorized instructions later
469   /// on, while the old loop will be used as the scalar remainder. Control flow
470   /// is generated around the vectorized (and scalar epilogue) loops consisting
471   /// of various checks and bypasses. Return the pre-header block of the new
472   /// loop and the start value for the canonical induction, if it is != 0. The
473   /// latter is the case when vectorizing the epilogue loop. In the case of
474   /// epilogue vectorization, this function is overriden to handle the more
475   /// complex control flow around the loops.
476   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
477 
478   /// Widen a single call instruction within the innermost loop.
479   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
480                             VPTransformState &State);
481 
482   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
483   void fixVectorizedLoop(VPTransformState &State);
484 
485   // Return true if any runtime check is added.
486   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
487 
488   /// A type for vectorized values in the new loop. Each value from the
489   /// original loop, when vectorized, is represented by UF vector values in the
490   /// new unrolled loop, where UF is the unroll factor.
491   using VectorParts = SmallVector<Value *, 2>;
492 
493   /// Vectorize a single vector PHINode in a block in the VPlan-native path
494   /// only.
495   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
496                            VPTransformState &State);
497 
498   /// A helper function to scalarize a single Instruction in the innermost loop.
499   /// Generates a sequence of scalar instances for each lane between \p MinLane
500   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
501   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
502   /// Instr's operands.
503   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
504                             const VPIteration &Instance, bool IfPredicateInstr,
505                             VPTransformState &State);
506 
507   /// Construct the vector value of a scalarized value \p V one lane at a time.
508   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
509                                  VPTransformState &State);
510 
511   /// Try to vectorize interleaved access group \p Group with the base address
512   /// given in \p Addr, optionally masking the vector operations if \p
513   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
514   /// values in the vectorized loop.
515   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
516                                 ArrayRef<VPValue *> VPDefs,
517                                 VPTransformState &State, VPValue *Addr,
518                                 ArrayRef<VPValue *> StoredValues,
519                                 VPValue *BlockInMask = nullptr);
520 
521   /// Set the debug location in the builder \p Ptr using the debug location in
522   /// \p V. If \p Ptr is None then it uses the class member's Builder.
523   void setDebugLocFromInst(const Value *V,
524                            Optional<IRBuilderBase *> CustomBuilder = None);
525 
526   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
527   void fixNonInductionPHIs(VPTransformState &State);
528 
529   /// Returns true if the reordering of FP operations is not allowed, but we are
530   /// able to vectorize with strict in-order reductions for the given RdxDesc.
531   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
532 
533   /// Create a broadcast instruction. This method generates a broadcast
534   /// instruction (shuffle) for loop invariant values and for the induction
535   /// value. If this is the induction variable then we extend it to N, N+1, ...
536   /// this is needed because each iteration in the loop corresponds to a SIMD
537   /// element.
538   virtual Value *getBroadcastInstrs(Value *V);
539 
540   /// Add metadata from one instruction to another.
541   ///
542   /// This includes both the original MDs from \p From and additional ones (\see
543   /// addNewMetadata).  Use this for *newly created* instructions in the vector
544   /// loop.
545   void addMetadata(Instruction *To, Instruction *From);
546 
547   /// Similar to the previous function but it adds the metadata to a
548   /// vector of instructions.
549   void addMetadata(ArrayRef<Value *> To, Instruction *From);
550 
551   // Returns the resume value (bc.merge.rdx) for a reduction as
552   // generated by fixReduction.
553   PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
554 
555 protected:
556   friend class LoopVectorizationPlanner;
557 
558   /// A small list of PHINodes.
559   using PhiVector = SmallVector<PHINode *, 4>;
560 
561   /// A type for scalarized values in the new loop. Each value from the
562   /// original loop, when scalarized, is represented by UF x VF scalar values
563   /// in the new unrolled loop, where UF is the unroll factor and VF is the
564   /// vectorization factor.
565   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
566 
567   /// Set up the values of the IVs correctly when exiting the vector loop.
568   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
569                     Value *CountRoundDown, Value *EndValue,
570                     BasicBlock *MiddleBlock, BasicBlock *VectorHeader);
571 
572   /// Handle all cross-iteration phis in the header.
573   void fixCrossIterationPHIs(VPTransformState &State);
574 
575   /// Create the exit value of first order recurrences in the middle block and
576   /// update their users.
577   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
578                                VPTransformState &State);
579 
580   /// Create code for the loop exit value of the reduction.
581   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
582 
583   /// Clear NSW/NUW flags from reduction instructions if necessary.
584   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
585                                VPTransformState &State);
586 
587   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
588   /// means we need to add the appropriate incoming value from the middle
589   /// block as exiting edges from the scalar epilogue loop (if present) are
590   /// already in place, and we exit the vector loop exclusively to the middle
591   /// block.
592   void fixLCSSAPHIs(VPTransformState &State);
593 
594   /// Iteratively sink the scalarized operands of a predicated instruction into
595   /// the block that was created for it.
596   void sinkScalarOperands(Instruction *PredInst);
597 
598   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
599   /// represented as.
600   void truncateToMinimalBitwidths(VPTransformState &State);
601 
602   /// Returns (and creates if needed) the original loop trip count.
603   Value *getOrCreateTripCount(BasicBlock *InsertBlock);
604 
605   /// Returns (and creates if needed) the trip count of the widened loop.
606   Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock);
607 
608   /// Returns a bitcasted value to the requested vector type.
609   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
610   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
611                                 const DataLayout &DL);
612 
613   /// Emit a bypass check to see if the vector trip count is zero, including if
614   /// it overflows.
615   void emitMinimumIterationCountCheck(BasicBlock *Bypass);
616 
617   /// Emit a bypass check to see if all of the SCEV assumptions we've
618   /// had to make are correct. Returns the block containing the checks or
619   /// nullptr if no checks have been added.
620   BasicBlock *emitSCEVChecks(BasicBlock *Bypass);
621 
622   /// Emit bypass checks to check any memory assumptions we may have made.
623   /// Returns the block containing the checks or nullptr if no checks have been
624   /// added.
625   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass);
626 
627   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
628   /// vector loop preheader, middle block and scalar preheader.
629   void createVectorLoopSkeleton(StringRef Prefix);
630 
631   /// Create new phi nodes for the induction variables to resume iteration count
632   /// in the scalar epilogue, from where the vectorized loop left off.
633   /// In cases where the loop skeleton is more complicated (eg. epilogue
634   /// vectorization) and the resume values can come from an additional bypass
635   /// block, the \p AdditionalBypass pair provides information about the bypass
636   /// block and the end value on the edge from bypass to this loop.
637   void createInductionResumeValues(
638       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
639 
640   /// Complete the loop skeleton by adding debug MDs, creating appropriate
641   /// conditional branches in the middle block, preparing the builder and
642   /// running the verifier. Return the preheader of the completed vector loop.
643   BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID);
644 
645   /// Add additional metadata to \p To that was not present on \p Orig.
646   ///
647   /// Currently this is used to add the noalias annotations based on the
648   /// inserted memchecks.  Use this for instructions that are *cloned* into the
649   /// vector loop.
650   void addNewMetadata(Instruction *To, const Instruction *Orig);
651 
652   /// Collect poison-generating recipes that may generate a poison value that is
653   /// used after vectorization, even when their operands are not poison. Those
654   /// recipes meet the following conditions:
655   ///  * Contribute to the address computation of a recipe generating a widen
656   ///    memory load/store (VPWidenMemoryInstructionRecipe or
657   ///    VPInterleaveRecipe).
658   ///  * Such a widen memory load/store has at least one underlying Instruction
659   ///    that is in a basic block that needs predication and after vectorization
660   ///    the generated instruction won't be predicated.
661   void collectPoisonGeneratingRecipes(VPTransformState &State);
662 
663   /// Allow subclasses to override and print debug traces before/after vplan
664   /// execution, when trace information is requested.
665   virtual void printDebugTracesAtStart(){};
666   virtual void printDebugTracesAtEnd(){};
667 
668   /// The original loop.
669   Loop *OrigLoop;
670 
671   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
672   /// dynamic knowledge to simplify SCEV expressions and converts them to a
673   /// more usable form.
674   PredicatedScalarEvolution &PSE;
675 
676   /// Loop Info.
677   LoopInfo *LI;
678 
679   /// Dominator Tree.
680   DominatorTree *DT;
681 
682   /// Alias Analysis.
683   AAResults *AA;
684 
685   /// Target Library Info.
686   const TargetLibraryInfo *TLI;
687 
688   /// Target Transform Info.
689   const TargetTransformInfo *TTI;
690 
691   /// Assumption Cache.
692   AssumptionCache *AC;
693 
694   /// Interface to emit optimization remarks.
695   OptimizationRemarkEmitter *ORE;
696 
697   /// LoopVersioning.  It's only set up (non-null) if memchecks were
698   /// used.
699   ///
700   /// This is currently only used to add no-alias metadata based on the
701   /// memchecks.  The actually versioning is performed manually.
702   std::unique_ptr<LoopVersioning> LVer;
703 
704   /// The vectorization SIMD factor to use. Each vector will have this many
705   /// vector elements.
706   ElementCount VF;
707 
708   /// The vectorization unroll factor to use. Each scalar is vectorized to this
709   /// many different vector instructions.
710   unsigned UF;
711 
712   /// The builder that we use
713   IRBuilder<> Builder;
714 
715   // --- Vectorization state ---
716 
717   /// The vector-loop preheader.
718   BasicBlock *LoopVectorPreHeader;
719 
720   /// The scalar-loop preheader.
721   BasicBlock *LoopScalarPreHeader;
722 
723   /// Middle Block between the vector and the scalar.
724   BasicBlock *LoopMiddleBlock;
725 
726   /// The unique ExitBlock of the scalar loop if one exists.  Note that
727   /// there can be multiple exiting edges reaching this block.
728   BasicBlock *LoopExitBlock;
729 
730   /// The scalar loop body.
731   BasicBlock *LoopScalarBody;
732 
733   /// A list of all bypass blocks. The first block is the entry of the loop.
734   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
735 
736   /// Store instructions that were predicated.
737   SmallVector<Instruction *, 4> PredicatedInstructions;
738 
739   /// Trip count of the original loop.
740   Value *TripCount = nullptr;
741 
742   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
743   Value *VectorTripCount = nullptr;
744 
745   /// The legality analysis.
746   LoopVectorizationLegality *Legal;
747 
748   /// The profitablity analysis.
749   LoopVectorizationCostModel *Cost;
750 
751   // Record whether runtime checks are added.
752   bool AddedSafetyChecks = false;
753 
754   // Holds the end values for each induction variable. We save the end values
755   // so we can later fix-up the external users of the induction variables.
756   DenseMap<PHINode *, Value *> IVEndValues;
757 
758   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
759   // fixed up at the end of vector code generation.
760   SmallVector<PHINode *, 8> OrigPHIsToFix;
761 
762   /// BFI and PSI are used to check for profile guided size optimizations.
763   BlockFrequencyInfo *BFI;
764   ProfileSummaryInfo *PSI;
765 
766   // Whether this loop should be optimized for size based on profile guided size
767   // optimizatios.
768   bool OptForSizeBasedOnProfile;
769 
770   /// Structure to hold information about generated runtime checks, responsible
771   /// for cleaning the checks, if vectorization turns out unprofitable.
772   GeneratedRTChecks &RTChecks;
773 
774   // Holds the resume values for reductions in the loops, used to set the
775   // correct start value of reduction PHIs when vectorizing the epilogue.
776   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
777       ReductionResumeValues;
778 };
779 
780 class InnerLoopUnroller : public InnerLoopVectorizer {
781 public:
782   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
783                     LoopInfo *LI, DominatorTree *DT,
784                     const TargetLibraryInfo *TLI,
785                     const TargetTransformInfo *TTI, AssumptionCache *AC,
786                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
787                     LoopVectorizationLegality *LVL,
788                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
789                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
790       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
791                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
792                             BFI, PSI, Check) {}
793 
794 private:
795   Value *getBroadcastInstrs(Value *V) override;
796 };
797 
798 /// Encapsulate information regarding vectorization of a loop and its epilogue.
799 /// This information is meant to be updated and used across two stages of
800 /// epilogue vectorization.
801 struct EpilogueLoopVectorizationInfo {
802   ElementCount MainLoopVF = ElementCount::getFixed(0);
803   unsigned MainLoopUF = 0;
804   ElementCount EpilogueVF = ElementCount::getFixed(0);
805   unsigned EpilogueUF = 0;
806   BasicBlock *MainLoopIterationCountCheck = nullptr;
807   BasicBlock *EpilogueIterationCountCheck = nullptr;
808   BasicBlock *SCEVSafetyCheck = nullptr;
809   BasicBlock *MemSafetyCheck = nullptr;
810   Value *TripCount = nullptr;
811   Value *VectorTripCount = nullptr;
812 
813   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
814                                 ElementCount EVF, unsigned EUF)
815       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
816     assert(EUF == 1 &&
817            "A high UF for the epilogue loop is likely not beneficial.");
818   }
819 };
820 
821 /// An extension of the inner loop vectorizer that creates a skeleton for a
822 /// vectorized loop that has its epilogue (residual) also vectorized.
823 /// The idea is to run the vplan on a given loop twice, firstly to setup the
824 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
825 /// from the first step and vectorize the epilogue.  This is achieved by
826 /// deriving two concrete strategy classes from this base class and invoking
827 /// them in succession from the loop vectorizer planner.
828 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
829 public:
830   InnerLoopAndEpilogueVectorizer(
831       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
832       DominatorTree *DT, const TargetLibraryInfo *TLI,
833       const TargetTransformInfo *TTI, AssumptionCache *AC,
834       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
835       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
836       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
837       GeneratedRTChecks &Checks)
838       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
839                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
840                             Checks),
841         EPI(EPI) {}
842 
843   // Override this function to handle the more complex control flow around the
844   // three loops.
845   std::pair<BasicBlock *, Value *>
846   createVectorizedLoopSkeleton() final override {
847     return createEpilogueVectorizedLoopSkeleton();
848   }
849 
850   /// The interface for creating a vectorized skeleton using one of two
851   /// different strategies, each corresponding to one execution of the vplan
852   /// as described above.
853   virtual std::pair<BasicBlock *, Value *>
854   createEpilogueVectorizedLoopSkeleton() = 0;
855 
856   /// Holds and updates state information required to vectorize the main loop
857   /// and its epilogue in two separate passes. This setup helps us avoid
858   /// regenerating and recomputing runtime safety checks. It also helps us to
859   /// shorten the iteration-count-check path length for the cases where the
860   /// iteration count of the loop is so small that the main vector loop is
861   /// completely skipped.
862   EpilogueLoopVectorizationInfo &EPI;
863 };
864 
865 /// A specialized derived class of inner loop vectorizer that performs
866 /// vectorization of *main* loops in the process of vectorizing loops and their
867 /// epilogues.
868 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
869 public:
870   EpilogueVectorizerMainLoop(
871       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
872       DominatorTree *DT, const TargetLibraryInfo *TLI,
873       const TargetTransformInfo *TTI, AssumptionCache *AC,
874       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
875       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
876       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
877       GeneratedRTChecks &Check)
878       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
879                                        EPI, LVL, CM, BFI, PSI, Check) {}
880   /// Implements the interface for creating a vectorized skeleton using the
881   /// *main loop* strategy (ie the first pass of vplan execution).
882   std::pair<BasicBlock *, Value *>
883   createEpilogueVectorizedLoopSkeleton() final override;
884 
885 protected:
886   /// Emits an iteration count bypass check once for the main loop (when \p
887   /// ForEpilogue is false) and once for the epilogue loop (when \p
888   /// ForEpilogue is true).
889   BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass,
890                                              bool ForEpilogue);
891   void printDebugTracesAtStart() override;
892   void printDebugTracesAtEnd() override;
893 };
894 
895 // A specialized derived class of inner loop vectorizer that performs
896 // vectorization of *epilogue* loops in the process of vectorizing loops and
897 // their epilogues.
898 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
899 public:
900   EpilogueVectorizerEpilogueLoop(
901       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
902       DominatorTree *DT, const TargetLibraryInfo *TLI,
903       const TargetTransformInfo *TTI, AssumptionCache *AC,
904       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
905       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
906       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
907       GeneratedRTChecks &Checks)
908       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
909                                        EPI, LVL, CM, BFI, PSI, Checks) {
910     TripCount = EPI.TripCount;
911   }
912   /// Implements the interface for creating a vectorized skeleton using the
913   /// *epilogue loop* strategy (ie the second pass of vplan execution).
914   std::pair<BasicBlock *, Value *>
915   createEpilogueVectorizedLoopSkeleton() final override;
916 
917 protected:
918   /// Emits an iteration count bypass check after the main vector loop has
919   /// finished to see if there are any iterations left to execute by either
920   /// the vector epilogue or the scalar epilogue.
921   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(
922                                                       BasicBlock *Bypass,
923                                                       BasicBlock *Insert);
924   void printDebugTracesAtStart() override;
925   void printDebugTracesAtEnd() override;
926 };
927 } // end namespace llvm
928 
929 /// Look for a meaningful debug location on the instruction or it's
930 /// operands.
931 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
932   if (!I)
933     return I;
934 
935   DebugLoc Empty;
936   if (I->getDebugLoc() != Empty)
937     return I;
938 
939   for (Use &Op : I->operands()) {
940     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
941       if (OpInst->getDebugLoc() != Empty)
942         return OpInst;
943   }
944 
945   return I;
946 }
947 
948 void InnerLoopVectorizer::setDebugLocFromInst(
949     const Value *V, Optional<IRBuilderBase *> CustomBuilder) {
950   IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
951   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
952     const DILocation *DIL = Inst->getDebugLoc();
953 
954     // When a FSDiscriminator is enabled, we don't need to add the multiply
955     // factors to the discriminators.
956     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
957         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
958       // FIXME: For scalable vectors, assume vscale=1.
959       auto NewDIL =
960           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
961       if (NewDIL)
962         B->SetCurrentDebugLocation(NewDIL.getValue());
963       else
964         LLVM_DEBUG(dbgs()
965                    << "Failed to create new discriminator: "
966                    << DIL->getFilename() << " Line: " << DIL->getLine());
967     } else
968       B->SetCurrentDebugLocation(DIL);
969   } else
970     B->SetCurrentDebugLocation(DebugLoc());
971 }
972 
973 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
974 /// is passed, the message relates to that particular instruction.
975 #ifndef NDEBUG
976 static void debugVectorizationMessage(const StringRef Prefix,
977                                       const StringRef DebugMsg,
978                                       Instruction *I) {
979   dbgs() << "LV: " << Prefix << DebugMsg;
980   if (I != nullptr)
981     dbgs() << " " << *I;
982   else
983     dbgs() << '.';
984   dbgs() << '\n';
985 }
986 #endif
987 
988 /// Create an analysis remark that explains why vectorization failed
989 ///
990 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
991 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
992 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
993 /// the location of the remark.  \return the remark object that can be
994 /// streamed to.
995 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
996     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
997   Value *CodeRegion = TheLoop->getHeader();
998   DebugLoc DL = TheLoop->getStartLoc();
999 
1000   if (I) {
1001     CodeRegion = I->getParent();
1002     // If there is no debug location attached to the instruction, revert back to
1003     // using the loop's.
1004     if (I->getDebugLoc())
1005       DL = I->getDebugLoc();
1006   }
1007 
1008   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1009 }
1010 
1011 namespace llvm {
1012 
1013 /// Return a value for Step multiplied by VF.
1014 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
1015                        int64_t Step) {
1016   assert(Ty->isIntegerTy() && "Expected an integer step");
1017   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1018   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1019 }
1020 
1021 /// Return the runtime value for VF.
1022 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) {
1023   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1024   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1025 }
1026 
1027 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy,
1028                                   ElementCount VF) {
1029   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1030   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1031   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1032   return B.CreateUIToFP(RuntimeVF, FTy);
1033 }
1034 
1035 void reportVectorizationFailure(const StringRef DebugMsg,
1036                                 const StringRef OREMsg, const StringRef ORETag,
1037                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1038                                 Instruction *I) {
1039   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1040   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1041   ORE->emit(
1042       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1043       << "loop not vectorized: " << OREMsg);
1044 }
1045 
1046 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1047                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1048                              Instruction *I) {
1049   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1050   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1051   ORE->emit(
1052       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1053       << Msg);
1054 }
1055 
1056 } // end namespace llvm
1057 
1058 #ifndef NDEBUG
1059 /// \return string containing a file name and a line # for the given loop.
1060 static std::string getDebugLocString(const Loop *L) {
1061   std::string Result;
1062   if (L) {
1063     raw_string_ostream OS(Result);
1064     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1065       LoopDbgLoc.print(OS);
1066     else
1067       // Just print the module name.
1068       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1069     OS.flush();
1070   }
1071   return Result;
1072 }
1073 #endif
1074 
1075 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1076                                          const Instruction *Orig) {
1077   // If the loop was versioned with memchecks, add the corresponding no-alias
1078   // metadata.
1079   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1080     LVer->annotateInstWithNoAlias(To, Orig);
1081 }
1082 
1083 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1084     VPTransformState &State) {
1085 
1086   // Collect recipes in the backward slice of `Root` that may generate a poison
1087   // value that is used after vectorization.
1088   SmallPtrSet<VPRecipeBase *, 16> Visited;
1089   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1090     SmallVector<VPRecipeBase *, 16> Worklist;
1091     Worklist.push_back(Root);
1092 
1093     // Traverse the backward slice of Root through its use-def chain.
1094     while (!Worklist.empty()) {
1095       VPRecipeBase *CurRec = Worklist.back();
1096       Worklist.pop_back();
1097 
1098       if (!Visited.insert(CurRec).second)
1099         continue;
1100 
1101       // Prune search if we find another recipe generating a widen memory
1102       // instruction. Widen memory instructions involved in address computation
1103       // will lead to gather/scatter instructions, which don't need to be
1104       // handled.
1105       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1106           isa<VPInterleaveRecipe>(CurRec) ||
1107           isa<VPScalarIVStepsRecipe>(CurRec) ||
1108           isa<VPCanonicalIVPHIRecipe>(CurRec))
1109         continue;
1110 
1111       // This recipe contributes to the address computation of a widen
1112       // load/store. Collect recipe if its underlying instruction has
1113       // poison-generating flags.
1114       Instruction *Instr = CurRec->getUnderlyingInstr();
1115       if (Instr && Instr->hasPoisonGeneratingFlags())
1116         State.MayGeneratePoisonRecipes.insert(CurRec);
1117 
1118       // Add new definitions to the worklist.
1119       for (VPValue *operand : CurRec->operands())
1120         if (VPDef *OpDef = operand->getDef())
1121           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1122     }
1123   });
1124 
1125   // Traverse all the recipes in the VPlan and collect the poison-generating
1126   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1127   // VPInterleaveRecipe.
1128   auto Iter = depth_first(
1129       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1130   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1131     for (VPRecipeBase &Recipe : *VPBB) {
1132       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1133         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1134         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1135         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1136             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1137           collectPoisonGeneratingInstrsInBackwardSlice(
1138               cast<VPRecipeBase>(AddrDef));
1139       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1140         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1141         if (AddrDef) {
1142           // Check if any member of the interleave group needs predication.
1143           const InterleaveGroup<Instruction> *InterGroup =
1144               InterleaveRec->getInterleaveGroup();
1145           bool NeedPredication = false;
1146           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1147                I < NumMembers; ++I) {
1148             Instruction *Member = InterGroup->getMember(I);
1149             if (Member)
1150               NeedPredication |=
1151                   Legal->blockNeedsPredication(Member->getParent());
1152           }
1153 
1154           if (NeedPredication)
1155             collectPoisonGeneratingInstrsInBackwardSlice(
1156                 cast<VPRecipeBase>(AddrDef));
1157         }
1158       }
1159     }
1160   }
1161 }
1162 
1163 void InnerLoopVectorizer::addMetadata(Instruction *To,
1164                                       Instruction *From) {
1165   propagateMetadata(To, From);
1166   addNewMetadata(To, From);
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1170                                       Instruction *From) {
1171   for (Value *V : To) {
1172     if (Instruction *I = dyn_cast<Instruction>(V))
1173       addMetadata(I, From);
1174   }
1175 }
1176 
1177 PHINode *InnerLoopVectorizer::getReductionResumeValue(
1178     const RecurrenceDescriptor &RdxDesc) {
1179   auto It = ReductionResumeValues.find(&RdxDesc);
1180   assert(It != ReductionResumeValues.end() &&
1181          "Expected to find a resume value for the reduction.");
1182   return It->second;
1183 }
1184 
1185 namespace llvm {
1186 
1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1188 // lowered.
1189 enum ScalarEpilogueLowering {
1190 
1191   // The default: allowing scalar epilogues.
1192   CM_ScalarEpilogueAllowed,
1193 
1194   // Vectorization with OptForSize: don't allow epilogues.
1195   CM_ScalarEpilogueNotAllowedOptSize,
1196 
1197   // A special case of vectorisation with OptForSize: loops with a very small
1198   // trip count are considered for vectorization under OptForSize, thereby
1199   // making sure the cost of their loop body is dominant, free of runtime
1200   // guards and scalar iteration overheads.
1201   CM_ScalarEpilogueNotAllowedLowTripLoop,
1202 
1203   // Loop hint predicate indicating an epilogue is undesired.
1204   CM_ScalarEpilogueNotNeededUsePredicate,
1205 
1206   // Directive indicating we must either tail fold or not vectorize
1207   CM_ScalarEpilogueNotAllowedUsePredicate
1208 };
1209 
1210 /// ElementCountComparator creates a total ordering for ElementCount
1211 /// for the purposes of using it in a set structure.
1212 struct ElementCountComparator {
1213   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1214     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1215            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1216   }
1217 };
1218 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1219 
1220 /// LoopVectorizationCostModel - estimates the expected speedups due to
1221 /// vectorization.
1222 /// In many cases vectorization is not profitable. This can happen because of
1223 /// a number of reasons. In this class we mainly attempt to predict the
1224 /// expected speedup/slowdowns due to the supported instruction set. We use the
1225 /// TargetTransformInfo to query the different backends for the cost of
1226 /// different operations.
1227 class LoopVectorizationCostModel {
1228 public:
1229   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1230                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1231                              LoopVectorizationLegality *Legal,
1232                              const TargetTransformInfo &TTI,
1233                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1234                              AssumptionCache *AC,
1235                              OptimizationRemarkEmitter *ORE, const Function *F,
1236                              const LoopVectorizeHints *Hints,
1237                              InterleavedAccessInfo &IAI)
1238       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1239         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1240         Hints(Hints), InterleaveInfo(IAI) {}
1241 
1242   /// \return An upper bound for the vectorization factors (both fixed and
1243   /// scalable). If the factors are 0, vectorization and interleaving should be
1244   /// avoided up front.
1245   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1246 
1247   /// \return True if runtime checks are required for vectorization, and false
1248   /// otherwise.
1249   bool runtimeChecksRequired();
1250 
1251   /// \return The most profitable vectorization factor and the cost of that VF.
1252   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1253   /// then this vectorization factor will be selected if vectorization is
1254   /// possible.
1255   VectorizationFactor
1256   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1257 
1258   VectorizationFactor
1259   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1260                                     const LoopVectorizationPlanner &LVP);
1261 
1262   /// Setup cost-based decisions for user vectorization factor.
1263   /// \return true if the UserVF is a feasible VF to be chosen.
1264   bool selectUserVectorizationFactor(ElementCount UserVF) {
1265     collectUniformsAndScalars(UserVF);
1266     collectInstsToScalarize(UserVF);
1267     return expectedCost(UserVF).first.isValid();
1268   }
1269 
1270   /// \return The size (in bits) of the smallest and widest types in the code
1271   /// that needs to be vectorized. We ignore values that remain scalar such as
1272   /// 64 bit loop indices.
1273   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1274 
1275   /// \return The desired interleave count.
1276   /// If interleave count has been specified by metadata it will be returned.
1277   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1278   /// are the selected vectorization factor and the cost of the selected VF.
1279   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1280 
1281   /// Memory access instruction may be vectorized in more than one way.
1282   /// Form of instruction after vectorization depends on cost.
1283   /// This function takes cost-based decisions for Load/Store instructions
1284   /// and collects them in a map. This decisions map is used for building
1285   /// the lists of loop-uniform and loop-scalar instructions.
1286   /// The calculated cost is saved with widening decision in order to
1287   /// avoid redundant calculations.
1288   void setCostBasedWideningDecision(ElementCount VF);
1289 
1290   /// A struct that represents some properties of the register usage
1291   /// of a loop.
1292   struct RegisterUsage {
1293     /// Holds the number of loop invariant values that are used in the loop.
1294     /// The key is ClassID of target-provided register class.
1295     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1296     /// Holds the maximum number of concurrent live intervals in the loop.
1297     /// The key is ClassID of target-provided register class.
1298     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1299   };
1300 
1301   /// \return Returns information about the register usages of the loop for the
1302   /// given vectorization factors.
1303   SmallVector<RegisterUsage, 8>
1304   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1305 
1306   /// Collect values we want to ignore in the cost model.
1307   void collectValuesToIgnore();
1308 
1309   /// Collect all element types in the loop for which widening is needed.
1310   void collectElementTypesForWidening();
1311 
1312   /// Split reductions into those that happen in the loop, and those that happen
1313   /// outside. In loop reductions are collected into InLoopReductionChains.
1314   void collectInLoopReductions();
1315 
1316   /// Returns true if we should use strict in-order reductions for the given
1317   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1318   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1319   /// of FP operations.
1320   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1321     return !Hints->allowReordering() && RdxDesc.isOrdered();
1322   }
1323 
1324   /// \returns The smallest bitwidth each instruction can be represented with.
1325   /// The vector equivalents of these instructions should be truncated to this
1326   /// type.
1327   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1328     return MinBWs;
1329   }
1330 
1331   /// \returns True if it is more profitable to scalarize instruction \p I for
1332   /// vectorization factor \p VF.
1333   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1334     assert(VF.isVector() &&
1335            "Profitable to scalarize relevant only for VF > 1.");
1336 
1337     // Cost model is not run in the VPlan-native path - return conservative
1338     // result until this changes.
1339     if (EnableVPlanNativePath)
1340       return false;
1341 
1342     auto Scalars = InstsToScalarize.find(VF);
1343     assert(Scalars != InstsToScalarize.end() &&
1344            "VF not yet analyzed for scalarization profitability");
1345     return Scalars->second.find(I) != Scalars->second.end();
1346   }
1347 
1348   /// Returns true if \p I is known to be uniform after vectorization.
1349   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1350     if (VF.isScalar())
1351       return true;
1352 
1353     // Cost model is not run in the VPlan-native path - return conservative
1354     // result until this changes.
1355     if (EnableVPlanNativePath)
1356       return false;
1357 
1358     auto UniformsPerVF = Uniforms.find(VF);
1359     assert(UniformsPerVF != Uniforms.end() &&
1360            "VF not yet analyzed for uniformity");
1361     return UniformsPerVF->second.count(I);
1362   }
1363 
1364   /// Returns true if \p I is known to be scalar after vectorization.
1365   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1366     if (VF.isScalar())
1367       return true;
1368 
1369     // Cost model is not run in the VPlan-native path - return conservative
1370     // result until this changes.
1371     if (EnableVPlanNativePath)
1372       return false;
1373 
1374     auto ScalarsPerVF = Scalars.find(VF);
1375     assert(ScalarsPerVF != Scalars.end() &&
1376            "Scalar values are not calculated for VF");
1377     return ScalarsPerVF->second.count(I);
1378   }
1379 
1380   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1381   /// for vectorization factor \p VF.
1382   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1383     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1384            !isProfitableToScalarize(I, VF) &&
1385            !isScalarAfterVectorization(I, VF);
1386   }
1387 
1388   /// Decision that was taken during cost calculation for memory instruction.
1389   enum InstWidening {
1390     CM_Unknown,
1391     CM_Widen,         // For consecutive accesses with stride +1.
1392     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1393     CM_Interleave,
1394     CM_GatherScatter,
1395     CM_Scalarize
1396   };
1397 
1398   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1399   /// instruction \p I and vector width \p VF.
1400   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1401                            InstructionCost Cost) {
1402     assert(VF.isVector() && "Expected VF >=2");
1403     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1404   }
1405 
1406   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1407   /// interleaving group \p Grp and vector width \p VF.
1408   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1409                            ElementCount VF, InstWidening W,
1410                            InstructionCost Cost) {
1411     assert(VF.isVector() && "Expected VF >=2");
1412     /// Broadcast this decicion to all instructions inside the group.
1413     /// But the cost will be assigned to one instruction only.
1414     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1415       if (auto *I = Grp->getMember(i)) {
1416         if (Grp->getInsertPos() == I)
1417           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1418         else
1419           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1420       }
1421     }
1422   }
1423 
1424   /// Return the cost model decision for the given instruction \p I and vector
1425   /// width \p VF. Return CM_Unknown if this instruction did not pass
1426   /// through the cost modeling.
1427   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1428     assert(VF.isVector() && "Expected VF to be a vector VF");
1429     // Cost model is not run in the VPlan-native path - return conservative
1430     // result until this changes.
1431     if (EnableVPlanNativePath)
1432       return CM_GatherScatter;
1433 
1434     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1435     auto Itr = WideningDecisions.find(InstOnVF);
1436     if (Itr == WideningDecisions.end())
1437       return CM_Unknown;
1438     return Itr->second.first;
1439   }
1440 
1441   /// Return the vectorization cost for the given instruction \p I and vector
1442   /// width \p VF.
1443   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1444     assert(VF.isVector() && "Expected VF >=2");
1445     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1446     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1447            "The cost is not calculated");
1448     return WideningDecisions[InstOnVF].second;
1449   }
1450 
1451   /// Return True if instruction \p I is an optimizable truncate whose operand
1452   /// is an induction variable. Such a truncate will be removed by adding a new
1453   /// induction variable with the destination type.
1454   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1455     // If the instruction is not a truncate, return false.
1456     auto *Trunc = dyn_cast<TruncInst>(I);
1457     if (!Trunc)
1458       return false;
1459 
1460     // Get the source and destination types of the truncate.
1461     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1462     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1463 
1464     // If the truncate is free for the given types, return false. Replacing a
1465     // free truncate with an induction variable would add an induction variable
1466     // update instruction to each iteration of the loop. We exclude from this
1467     // check the primary induction variable since it will need an update
1468     // instruction regardless.
1469     Value *Op = Trunc->getOperand(0);
1470     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1471       return false;
1472 
1473     // If the truncated value is not an induction variable, return false.
1474     return Legal->isInductionPhi(Op);
1475   }
1476 
1477   /// Collects the instructions to scalarize for each predicated instruction in
1478   /// the loop.
1479   void collectInstsToScalarize(ElementCount VF);
1480 
1481   /// Collect Uniform and Scalar values for the given \p VF.
1482   /// The sets depend on CM decision for Load/Store instructions
1483   /// that may be vectorized as interleave, gather-scatter or scalarized.
1484   void collectUniformsAndScalars(ElementCount VF) {
1485     // Do the analysis once.
1486     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1487       return;
1488     setCostBasedWideningDecision(VF);
1489     collectLoopUniforms(VF);
1490     collectLoopScalars(VF);
1491   }
1492 
1493   /// Returns true if the target machine supports masked store operation
1494   /// for the given \p DataType and kind of access to \p Ptr.
1495   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1496     return Legal->isConsecutivePtr(DataType, Ptr) &&
1497            TTI.isLegalMaskedStore(DataType, Alignment);
1498   }
1499 
1500   /// Returns true if the target machine supports masked load operation
1501   /// for the given \p DataType and kind of access to \p Ptr.
1502   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1503     return Legal->isConsecutivePtr(DataType, Ptr) &&
1504            TTI.isLegalMaskedLoad(DataType, Alignment);
1505   }
1506 
1507   /// Returns true if the target machine can represent \p V as a masked gather
1508   /// or scatter operation.
1509   bool isLegalGatherOrScatter(Value *V,
1510                               ElementCount VF = ElementCount::getFixed(1)) {
1511     bool LI = isa<LoadInst>(V);
1512     bool SI = isa<StoreInst>(V);
1513     if (!LI && !SI)
1514       return false;
1515     auto *Ty = getLoadStoreType(V);
1516     Align Align = getLoadStoreAlignment(V);
1517     if (VF.isVector())
1518       Ty = VectorType::get(Ty, VF);
1519     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1520            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1521   }
1522 
1523   /// Returns true if the target machine supports all of the reduction
1524   /// variables found for the given VF.
1525   bool canVectorizeReductions(ElementCount VF) const {
1526     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1527       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1528       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1529     }));
1530   }
1531 
1532   /// Returns true if \p I is an instruction that will be scalarized with
1533   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1534   /// instructions include conditional stores and instructions that may divide
1535   /// by zero.
1536   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1537 
1538   // Returns true if \p I is an instruction that will be predicated either
1539   // through scalar predication or masked load/store or masked gather/scatter.
1540   // \p VF is the vectorization factor that will be used to vectorize \p I.
1541   // Superset of instructions that return true for isScalarWithPredication.
1542   bool isPredicatedInst(Instruction *I, ElementCount VF,
1543                         bool IsKnownUniform = false) {
1544     // When we know the load is uniform and the original scalar loop was not
1545     // predicated we don't need to mark it as a predicated instruction. Any
1546     // vectorised blocks created when tail-folding are something artificial we
1547     // have introduced and we know there is always at least one active lane.
1548     // That's why we call Legal->blockNeedsPredication here because it doesn't
1549     // query tail-folding.
1550     if (IsKnownUniform && isa<LoadInst>(I) &&
1551         !Legal->blockNeedsPredication(I->getParent()))
1552       return false;
1553     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1554       return false;
1555     // Loads and stores that need some form of masked operation are predicated
1556     // instructions.
1557     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1558       return Legal->isMaskRequired(I);
1559     return isScalarWithPredication(I, VF);
1560   }
1561 
1562   /// Returns true if \p I is a memory instruction with consecutive memory
1563   /// access that can be widened.
1564   bool
1565   memoryInstructionCanBeWidened(Instruction *I,
1566                                 ElementCount VF = ElementCount::getFixed(1));
1567 
1568   /// Returns true if \p I is a memory instruction in an interleaved-group
1569   /// of memory accesses that can be vectorized with wide vector loads/stores
1570   /// and shuffles.
1571   bool
1572   interleavedAccessCanBeWidened(Instruction *I,
1573                                 ElementCount VF = ElementCount::getFixed(1));
1574 
1575   /// Check if \p Instr belongs to any interleaved access group.
1576   bool isAccessInterleaved(Instruction *Instr) {
1577     return InterleaveInfo.isInterleaved(Instr);
1578   }
1579 
1580   /// Get the interleaved access group that \p Instr belongs to.
1581   const InterleaveGroup<Instruction> *
1582   getInterleavedAccessGroup(Instruction *Instr) {
1583     return InterleaveInfo.getInterleaveGroup(Instr);
1584   }
1585 
1586   /// Returns true if we're required to use a scalar epilogue for at least
1587   /// the final iteration of the original loop.
1588   bool requiresScalarEpilogue(ElementCount VF) const {
1589     if (!isScalarEpilogueAllowed())
1590       return false;
1591     // If we might exit from anywhere but the latch, must run the exiting
1592     // iteration in scalar form.
1593     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1594       return true;
1595     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1596   }
1597 
1598   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1599   /// loop hint annotation.
1600   bool isScalarEpilogueAllowed() const {
1601     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1602   }
1603 
1604   /// Returns true if all loop blocks should be masked to fold tail loop.
1605   bool foldTailByMasking() const { return FoldTailByMasking; }
1606 
1607   /// Returns true if the instructions in this block requires predication
1608   /// for any reason, e.g. because tail folding now requires a predicate
1609   /// or because the block in the original loop was predicated.
1610   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1611     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1612   }
1613 
1614   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1615   /// nodes to the chain of instructions representing the reductions. Uses a
1616   /// MapVector to ensure deterministic iteration order.
1617   using ReductionChainMap =
1618       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1619 
1620   /// Return the chain of instructions representing an inloop reduction.
1621   const ReductionChainMap &getInLoopReductionChains() const {
1622     return InLoopReductionChains;
1623   }
1624 
1625   /// Returns true if the Phi is part of an inloop reduction.
1626   bool isInLoopReduction(PHINode *Phi) const {
1627     return InLoopReductionChains.count(Phi);
1628   }
1629 
1630   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1631   /// with factor VF.  Return the cost of the instruction, including
1632   /// scalarization overhead if it's needed.
1633   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1634 
1635   /// Estimate cost of a call instruction CI if it were vectorized with factor
1636   /// VF. Return the cost of the instruction, including scalarization overhead
1637   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1638   /// scalarized -
1639   /// i.e. either vector version isn't available, or is too expensive.
1640   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1641                                     bool &NeedToScalarize) const;
1642 
1643   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1644   /// that of B.
1645   bool isMoreProfitable(const VectorizationFactor &A,
1646                         const VectorizationFactor &B) const;
1647 
1648   /// Invalidates decisions already taken by the cost model.
1649   void invalidateCostModelingDecisions() {
1650     WideningDecisions.clear();
1651     Uniforms.clear();
1652     Scalars.clear();
1653   }
1654 
1655 private:
1656   unsigned NumPredStores = 0;
1657 
1658   /// Convenience function that returns the value of vscale_range iff
1659   /// vscale_range.min == vscale_range.max or otherwise returns the value
1660   /// returned by the corresponding TLI method.
1661   Optional<unsigned> getVScaleForTuning() const;
1662 
1663   /// \return An upper bound for the vectorization factors for both
1664   /// fixed and scalable vectorization, where the minimum-known number of
1665   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1666   /// disabled or unsupported, then the scalable part will be equal to
1667   /// ElementCount::getScalable(0).
1668   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1669                                            ElementCount UserVF,
1670                                            bool FoldTailByMasking);
1671 
1672   /// \return the maximized element count based on the targets vector
1673   /// registers and the loop trip-count, but limited to a maximum safe VF.
1674   /// This is a helper function of computeFeasibleMaxVF.
1675   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1676   /// issue that occurred on one of the buildbots which cannot be reproduced
1677   /// without having access to the properietary compiler (see comments on
1678   /// D98509). The issue is currently under investigation and this workaround
1679   /// will be removed as soon as possible.
1680   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1681                                        unsigned SmallestType,
1682                                        unsigned WidestType,
1683                                        const ElementCount &MaxSafeVF,
1684                                        bool FoldTailByMasking);
1685 
1686   /// \return the maximum legal scalable VF, based on the safe max number
1687   /// of elements.
1688   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1689 
1690   /// The vectorization cost is a combination of the cost itself and a boolean
1691   /// indicating whether any of the contributing operations will actually
1692   /// operate on vector values after type legalization in the backend. If this
1693   /// latter value is false, then all operations will be scalarized (i.e. no
1694   /// vectorization has actually taken place).
1695   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1696 
1697   /// Returns the expected execution cost. The unit of the cost does
1698   /// not matter because we use the 'cost' units to compare different
1699   /// vector widths. The cost that is returned is *not* normalized by
1700   /// the factor width. If \p Invalid is not nullptr, this function
1701   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1702   /// each instruction that has an Invalid cost for the given VF.
1703   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1704   VectorizationCostTy
1705   expectedCost(ElementCount VF,
1706                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1707 
1708   /// Returns the execution time cost of an instruction for a given vector
1709   /// width. Vector width of one means scalar.
1710   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1711 
1712   /// The cost-computation logic from getInstructionCost which provides
1713   /// the vector type as an output parameter.
1714   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1715                                      Type *&VectorTy);
1716 
1717   /// Return the cost of instructions in an inloop reduction pattern, if I is
1718   /// part of that pattern.
1719   Optional<InstructionCost>
1720   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1721                           TTI::TargetCostKind CostKind);
1722 
1723   /// Calculate vectorization cost of memory instruction \p I.
1724   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1725 
1726   /// The cost computation for scalarized memory instruction.
1727   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1728 
1729   /// The cost computation for interleaving group of memory instructions.
1730   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1731 
1732   /// The cost computation for Gather/Scatter instruction.
1733   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1734 
1735   /// The cost computation for widening instruction \p I with consecutive
1736   /// memory access.
1737   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1738 
1739   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1740   /// Load: scalar load + broadcast.
1741   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1742   /// element)
1743   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1744 
1745   /// Estimate the overhead of scalarizing an instruction. This is a
1746   /// convenience wrapper for the type-based getScalarizationOverhead API.
1747   InstructionCost getScalarizationOverhead(Instruction *I,
1748                                            ElementCount VF) const;
1749 
1750   /// Returns whether the instruction is a load or store and will be a emitted
1751   /// as a vector operation.
1752   bool isConsecutiveLoadOrStore(Instruction *I);
1753 
1754   /// Returns true if an artificially high cost for emulated masked memrefs
1755   /// should be used.
1756   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1757 
1758   /// Map of scalar integer values to the smallest bitwidth they can be legally
1759   /// represented as. The vector equivalents of these values should be truncated
1760   /// to this type.
1761   MapVector<Instruction *, uint64_t> MinBWs;
1762 
1763   /// A type representing the costs for instructions if they were to be
1764   /// scalarized rather than vectorized. The entries are Instruction-Cost
1765   /// pairs.
1766   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1767 
1768   /// A set containing all BasicBlocks that are known to present after
1769   /// vectorization as a predicated block.
1770   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1771 
1772   /// Records whether it is allowed to have the original scalar loop execute at
1773   /// least once. This may be needed as a fallback loop in case runtime
1774   /// aliasing/dependence checks fail, or to handle the tail/remainder
1775   /// iterations when the trip count is unknown or doesn't divide by the VF,
1776   /// or as a peel-loop to handle gaps in interleave-groups.
1777   /// Under optsize and when the trip count is very small we don't allow any
1778   /// iterations to execute in the scalar loop.
1779   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1780 
1781   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1782   bool FoldTailByMasking = false;
1783 
1784   /// A map holding scalar costs for different vectorization factors. The
1785   /// presence of a cost for an instruction in the mapping indicates that the
1786   /// instruction will be scalarized when vectorizing with the associated
1787   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1788   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1789 
1790   /// Holds the instructions known to be uniform after vectorization.
1791   /// The data is collected per VF.
1792   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1793 
1794   /// Holds the instructions known to be scalar after vectorization.
1795   /// The data is collected per VF.
1796   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1797 
1798   /// Holds the instructions (address computations) that are forced to be
1799   /// scalarized.
1800   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1801 
1802   /// PHINodes of the reductions that should be expanded in-loop along with
1803   /// their associated chains of reduction operations, in program order from top
1804   /// (PHI) to bottom
1805   ReductionChainMap InLoopReductionChains;
1806 
1807   /// A Map of inloop reduction operations and their immediate chain operand.
1808   /// FIXME: This can be removed once reductions can be costed correctly in
1809   /// vplan. This was added to allow quick lookup to the inloop operations,
1810   /// without having to loop through InLoopReductionChains.
1811   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1812 
1813   /// Returns the expected difference in cost from scalarizing the expression
1814   /// feeding a predicated instruction \p PredInst. The instructions to
1815   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1816   /// non-negative return value implies the expression will be scalarized.
1817   /// Currently, only single-use chains are considered for scalarization.
1818   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1819                               ElementCount VF);
1820 
1821   /// Collect the instructions that are uniform after vectorization. An
1822   /// instruction is uniform if we represent it with a single scalar value in
1823   /// the vectorized loop corresponding to each vector iteration. Examples of
1824   /// uniform instructions include pointer operands of consecutive or
1825   /// interleaved memory accesses. Note that although uniformity implies an
1826   /// instruction will be scalar, the reverse is not true. In general, a
1827   /// scalarized instruction will be represented by VF scalar values in the
1828   /// vectorized loop, each corresponding to an iteration of the original
1829   /// scalar loop.
1830   void collectLoopUniforms(ElementCount VF);
1831 
1832   /// Collect the instructions that are scalar after vectorization. An
1833   /// instruction is scalar if it is known to be uniform or will be scalarized
1834   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1835   /// to the list if they are used by a load/store instruction that is marked as
1836   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1837   /// VF values in the vectorized loop, each corresponding to an iteration of
1838   /// the original scalar loop.
1839   void collectLoopScalars(ElementCount VF);
1840 
1841   /// Keeps cost model vectorization decision and cost for instructions.
1842   /// Right now it is used for memory instructions only.
1843   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1844                                 std::pair<InstWidening, InstructionCost>>;
1845 
1846   DecisionList WideningDecisions;
1847 
1848   /// Returns true if \p V is expected to be vectorized and it needs to be
1849   /// extracted.
1850   bool needsExtract(Value *V, ElementCount VF) const {
1851     Instruction *I = dyn_cast<Instruction>(V);
1852     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1853         TheLoop->isLoopInvariant(I))
1854       return false;
1855 
1856     // Assume we can vectorize V (and hence we need extraction) if the
1857     // scalars are not computed yet. This can happen, because it is called
1858     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1859     // the scalars are collected. That should be a safe assumption in most
1860     // cases, because we check if the operands have vectorizable types
1861     // beforehand in LoopVectorizationLegality.
1862     return Scalars.find(VF) == Scalars.end() ||
1863            !isScalarAfterVectorization(I, VF);
1864   };
1865 
1866   /// Returns a range containing only operands needing to be extracted.
1867   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1868                                                    ElementCount VF) const {
1869     return SmallVector<Value *, 4>(make_filter_range(
1870         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1871   }
1872 
1873   /// Determines if we have the infrastructure to vectorize loop \p L and its
1874   /// epilogue, assuming the main loop is vectorized by \p VF.
1875   bool isCandidateForEpilogueVectorization(const Loop &L,
1876                                            const ElementCount VF) const;
1877 
1878   /// Returns true if epilogue vectorization is considered profitable, and
1879   /// false otherwise.
1880   /// \p VF is the vectorization factor chosen for the original loop.
1881   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1882 
1883 public:
1884   /// The loop that we evaluate.
1885   Loop *TheLoop;
1886 
1887   /// Predicated scalar evolution analysis.
1888   PredicatedScalarEvolution &PSE;
1889 
1890   /// Loop Info analysis.
1891   LoopInfo *LI;
1892 
1893   /// Vectorization legality.
1894   LoopVectorizationLegality *Legal;
1895 
1896   /// Vector target information.
1897   const TargetTransformInfo &TTI;
1898 
1899   /// Target Library Info.
1900   const TargetLibraryInfo *TLI;
1901 
1902   /// Demanded bits analysis.
1903   DemandedBits *DB;
1904 
1905   /// Assumption cache.
1906   AssumptionCache *AC;
1907 
1908   /// Interface to emit optimization remarks.
1909   OptimizationRemarkEmitter *ORE;
1910 
1911   const Function *TheFunction;
1912 
1913   /// Loop Vectorize Hint.
1914   const LoopVectorizeHints *Hints;
1915 
1916   /// The interleave access information contains groups of interleaved accesses
1917   /// with the same stride and close to each other.
1918   InterleavedAccessInfo &InterleaveInfo;
1919 
1920   /// Values to ignore in the cost model.
1921   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1922 
1923   /// Values to ignore in the cost model when VF > 1.
1924   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1925 
1926   /// All element types found in the loop.
1927   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1928 
1929   /// Profitable vector factors.
1930   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1931 };
1932 } // end namespace llvm
1933 
1934 /// Helper struct to manage generating runtime checks for vectorization.
1935 ///
1936 /// The runtime checks are created up-front in temporary blocks to allow better
1937 /// estimating the cost and un-linked from the existing IR. After deciding to
1938 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1939 /// temporary blocks are completely removed.
1940 class GeneratedRTChecks {
1941   /// Basic block which contains the generated SCEV checks, if any.
1942   BasicBlock *SCEVCheckBlock = nullptr;
1943 
1944   /// The value representing the result of the generated SCEV checks. If it is
1945   /// nullptr, either no SCEV checks have been generated or they have been used.
1946   Value *SCEVCheckCond = nullptr;
1947 
1948   /// Basic block which contains the generated memory runtime checks, if any.
1949   BasicBlock *MemCheckBlock = nullptr;
1950 
1951   /// The value representing the result of the generated memory runtime checks.
1952   /// If it is nullptr, either no memory runtime checks have been generated or
1953   /// they have been used.
1954   Value *MemRuntimeCheckCond = nullptr;
1955 
1956   DominatorTree *DT;
1957   LoopInfo *LI;
1958 
1959   SCEVExpander SCEVExp;
1960   SCEVExpander MemCheckExp;
1961 
1962 public:
1963   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1964                     const DataLayout &DL)
1965       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1966         MemCheckExp(SE, DL, "scev.check") {}
1967 
1968   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1969   /// accurately estimate the cost of the runtime checks. The blocks are
1970   /// un-linked from the IR and is added back during vector code generation. If
1971   /// there is no vector code generation, the check blocks are removed
1972   /// completely.
1973   void Create(Loop *L, const LoopAccessInfo &LAI,
1974               const SCEVPredicate &Pred) {
1975 
1976     BasicBlock *LoopHeader = L->getHeader();
1977     BasicBlock *Preheader = L->getLoopPreheader();
1978 
1979     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1980     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1981     // may be used by SCEVExpander. The blocks will be un-linked from their
1982     // predecessors and removed from LI & DT at the end of the function.
1983     if (!Pred.isAlwaysTrue()) {
1984       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1985                                   nullptr, "vector.scevcheck");
1986 
1987       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1988           &Pred, SCEVCheckBlock->getTerminator());
1989     }
1990 
1991     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1992     if (RtPtrChecking.Need) {
1993       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1994       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1995                                  "vector.memcheck");
1996 
1997       MemRuntimeCheckCond =
1998           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1999                            RtPtrChecking.getChecks(), MemCheckExp);
2000       assert(MemRuntimeCheckCond &&
2001              "no RT checks generated although RtPtrChecking "
2002              "claimed checks are required");
2003     }
2004 
2005     if (!MemCheckBlock && !SCEVCheckBlock)
2006       return;
2007 
2008     // Unhook the temporary block with the checks, update various places
2009     // accordingly.
2010     if (SCEVCheckBlock)
2011       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2012     if (MemCheckBlock)
2013       MemCheckBlock->replaceAllUsesWith(Preheader);
2014 
2015     if (SCEVCheckBlock) {
2016       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2017       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2018       Preheader->getTerminator()->eraseFromParent();
2019     }
2020     if (MemCheckBlock) {
2021       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2022       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2023       Preheader->getTerminator()->eraseFromParent();
2024     }
2025 
2026     DT->changeImmediateDominator(LoopHeader, Preheader);
2027     if (MemCheckBlock) {
2028       DT->eraseNode(MemCheckBlock);
2029       LI->removeBlock(MemCheckBlock);
2030     }
2031     if (SCEVCheckBlock) {
2032       DT->eraseNode(SCEVCheckBlock);
2033       LI->removeBlock(SCEVCheckBlock);
2034     }
2035   }
2036 
2037   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2038   /// unused.
2039   ~GeneratedRTChecks() {
2040     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2041     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2042     if (!SCEVCheckCond)
2043       SCEVCleaner.markResultUsed();
2044 
2045     if (!MemRuntimeCheckCond)
2046       MemCheckCleaner.markResultUsed();
2047 
2048     if (MemRuntimeCheckCond) {
2049       auto &SE = *MemCheckExp.getSE();
2050       // Memory runtime check generation creates compares that use expanded
2051       // values. Remove them before running the SCEVExpanderCleaners.
2052       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2053         if (MemCheckExp.isInsertedInstruction(&I))
2054           continue;
2055         SE.forgetValue(&I);
2056         I.eraseFromParent();
2057       }
2058     }
2059     MemCheckCleaner.cleanup();
2060     SCEVCleaner.cleanup();
2061 
2062     if (SCEVCheckCond)
2063       SCEVCheckBlock->eraseFromParent();
2064     if (MemRuntimeCheckCond)
2065       MemCheckBlock->eraseFromParent();
2066   }
2067 
2068   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2069   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2070   /// depending on the generated condition.
2071   BasicBlock *emitSCEVChecks(BasicBlock *Bypass,
2072                              BasicBlock *LoopVectorPreHeader,
2073                              BasicBlock *LoopExitBlock) {
2074     if (!SCEVCheckCond)
2075       return nullptr;
2076     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2077       if (C->isZero())
2078         return nullptr;
2079 
2080     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2081 
2082     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2083     // Create new preheader for vector loop.
2084     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2085       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2086 
2087     SCEVCheckBlock->getTerminator()->eraseFromParent();
2088     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2089     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2090                                                 SCEVCheckBlock);
2091 
2092     DT->addNewBlock(SCEVCheckBlock, Pred);
2093     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2094 
2095     ReplaceInstWithInst(
2096         SCEVCheckBlock->getTerminator(),
2097         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2098     // Mark the check as used, to prevent it from being removed during cleanup.
2099     SCEVCheckCond = nullptr;
2100     return SCEVCheckBlock;
2101   }
2102 
2103   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2104   /// the branches to branch to the vector preheader or \p Bypass, depending on
2105   /// the generated condition.
2106   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass,
2107                                    BasicBlock *LoopVectorPreHeader) {
2108     // Check if we generated code that checks in runtime if arrays overlap.
2109     if (!MemRuntimeCheckCond)
2110       return nullptr;
2111 
2112     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2113     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2114                                                 MemCheckBlock);
2115 
2116     DT->addNewBlock(MemCheckBlock, Pred);
2117     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2118     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2119 
2120     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2121       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2122 
2123     ReplaceInstWithInst(
2124         MemCheckBlock->getTerminator(),
2125         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2126     MemCheckBlock->getTerminator()->setDebugLoc(
2127         Pred->getTerminator()->getDebugLoc());
2128 
2129     // Mark the check as used, to prevent it from being removed during cleanup.
2130     MemRuntimeCheckCond = nullptr;
2131     return MemCheckBlock;
2132   }
2133 };
2134 
2135 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2136 // vectorization. The loop needs to be annotated with #pragma omp simd
2137 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2138 // vector length information is not provided, vectorization is not considered
2139 // explicit. Interleave hints are not allowed either. These limitations will be
2140 // relaxed in the future.
2141 // Please, note that we are currently forced to abuse the pragma 'clang
2142 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2143 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2144 // provides *explicit vectorization hints* (LV can bypass legal checks and
2145 // assume that vectorization is legal). However, both hints are implemented
2146 // using the same metadata (llvm.loop.vectorize, processed by
2147 // LoopVectorizeHints). This will be fixed in the future when the native IR
2148 // representation for pragma 'omp simd' is introduced.
2149 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2150                                    OptimizationRemarkEmitter *ORE) {
2151   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2152   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2153 
2154   // Only outer loops with an explicit vectorization hint are supported.
2155   // Unannotated outer loops are ignored.
2156   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2157     return false;
2158 
2159   Function *Fn = OuterLp->getHeader()->getParent();
2160   if (!Hints.allowVectorization(Fn, OuterLp,
2161                                 true /*VectorizeOnlyWhenForced*/)) {
2162     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2163     return false;
2164   }
2165 
2166   if (Hints.getInterleave() > 1) {
2167     // TODO: Interleave support is future work.
2168     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2169                          "outer loops.\n");
2170     Hints.emitRemarkWithHints();
2171     return false;
2172   }
2173 
2174   return true;
2175 }
2176 
2177 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2178                                   OptimizationRemarkEmitter *ORE,
2179                                   SmallVectorImpl<Loop *> &V) {
2180   // Collect inner loops and outer loops without irreducible control flow. For
2181   // now, only collect outer loops that have explicit vectorization hints. If we
2182   // are stress testing the VPlan H-CFG construction, we collect the outermost
2183   // loop of every loop nest.
2184   if (L.isInnermost() || VPlanBuildStressTest ||
2185       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2186     LoopBlocksRPO RPOT(&L);
2187     RPOT.perform(LI);
2188     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2189       V.push_back(&L);
2190       // TODO: Collect inner loops inside marked outer loops in case
2191       // vectorization fails for the outer loop. Do not invoke
2192       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2193       // already known to be reducible. We can use an inherited attribute for
2194       // that.
2195       return;
2196     }
2197   }
2198   for (Loop *InnerL : L)
2199     collectSupportedLoops(*InnerL, LI, ORE, V);
2200 }
2201 
2202 namespace {
2203 
2204 /// The LoopVectorize Pass.
2205 struct LoopVectorize : public FunctionPass {
2206   /// Pass identification, replacement for typeid
2207   static char ID;
2208 
2209   LoopVectorizePass Impl;
2210 
2211   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2212                          bool VectorizeOnlyWhenForced = false)
2213       : FunctionPass(ID),
2214         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2215     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2216   }
2217 
2218   bool runOnFunction(Function &F) override {
2219     if (skipFunction(F))
2220       return false;
2221 
2222     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2223     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2224     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2225     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2226     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2227     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2228     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2229     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2230     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2231     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2232     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2233     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2234     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2235 
2236     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2237         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2238 
2239     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2240                         GetLAA, *ORE, PSI).MadeAnyChange;
2241   }
2242 
2243   void getAnalysisUsage(AnalysisUsage &AU) const override {
2244     AU.addRequired<AssumptionCacheTracker>();
2245     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2246     AU.addRequired<DominatorTreeWrapperPass>();
2247     AU.addRequired<LoopInfoWrapperPass>();
2248     AU.addRequired<ScalarEvolutionWrapperPass>();
2249     AU.addRequired<TargetTransformInfoWrapperPass>();
2250     AU.addRequired<AAResultsWrapperPass>();
2251     AU.addRequired<LoopAccessLegacyAnalysis>();
2252     AU.addRequired<DemandedBitsWrapperPass>();
2253     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2254     AU.addRequired<InjectTLIMappingsLegacy>();
2255 
2256     // We currently do not preserve loopinfo/dominator analyses with outer loop
2257     // vectorization. Until this is addressed, mark these analyses as preserved
2258     // only for non-VPlan-native path.
2259     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2260     if (!EnableVPlanNativePath) {
2261       AU.addPreserved<LoopInfoWrapperPass>();
2262       AU.addPreserved<DominatorTreeWrapperPass>();
2263     }
2264 
2265     AU.addPreserved<BasicAAWrapperPass>();
2266     AU.addPreserved<GlobalsAAWrapperPass>();
2267     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2268   }
2269 };
2270 
2271 } // end anonymous namespace
2272 
2273 //===----------------------------------------------------------------------===//
2274 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2275 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2276 //===----------------------------------------------------------------------===//
2277 
2278 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2279   // We need to place the broadcast of invariant variables outside the loop,
2280   // but only if it's proven safe to do so. Else, broadcast will be inside
2281   // vector loop body.
2282   Instruction *Instr = dyn_cast<Instruction>(V);
2283   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2284                      (!Instr ||
2285                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2286   // Place the code for broadcasting invariant variables in the new preheader.
2287   IRBuilder<>::InsertPointGuard Guard(Builder);
2288   if (SafeToHoist)
2289     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2290 
2291   // Broadcast the scalar into all locations in the vector.
2292   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2293 
2294   return Shuf;
2295 }
2296 
2297 /// This function adds
2298 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2299 /// to each vector element of Val. The sequence starts at StartIndex.
2300 /// \p Opcode is relevant for FP induction variable.
2301 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2302                             Instruction::BinaryOps BinOp, ElementCount VF,
2303                             IRBuilderBase &Builder) {
2304   assert(VF.isVector() && "only vector VFs are supported");
2305 
2306   // Create and check the types.
2307   auto *ValVTy = cast<VectorType>(Val->getType());
2308   ElementCount VLen = ValVTy->getElementCount();
2309 
2310   Type *STy = Val->getType()->getScalarType();
2311   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2312          "Induction Step must be an integer or FP");
2313   assert(Step->getType() == STy && "Step has wrong type");
2314 
2315   SmallVector<Constant *, 8> Indices;
2316 
2317   // Create a vector of consecutive numbers from zero to VF.
2318   VectorType *InitVecValVTy = ValVTy;
2319   if (STy->isFloatingPointTy()) {
2320     Type *InitVecValSTy =
2321         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2322     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2323   }
2324   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2325 
2326   // Splat the StartIdx
2327   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2328 
2329   if (STy->isIntegerTy()) {
2330     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2331     Step = Builder.CreateVectorSplat(VLen, Step);
2332     assert(Step->getType() == Val->getType() && "Invalid step vec");
2333     // FIXME: The newly created binary instructions should contain nsw/nuw
2334     // flags, which can be found from the original scalar operations.
2335     Step = Builder.CreateMul(InitVec, Step);
2336     return Builder.CreateAdd(Val, Step, "induction");
2337   }
2338 
2339   // Floating point induction.
2340   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2341          "Binary Opcode should be specified for FP induction");
2342   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2343   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2344 
2345   Step = Builder.CreateVectorSplat(VLen, Step);
2346   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2347   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2348 }
2349 
2350 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2351 /// variable on which to base the steps, \p Step is the size of the step.
2352 static void buildScalarSteps(Value *ScalarIV, Value *Step,
2353                              const InductionDescriptor &ID, VPValue *Def,
2354                              VPTransformState &State) {
2355   IRBuilderBase &Builder = State.Builder;
2356   // We shouldn't have to build scalar steps if we aren't vectorizing.
2357   assert(State.VF.isVector() && "VF should be greater than one");
2358   // Get the value type and ensure it and the step have the same integer type.
2359   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2360   assert(ScalarIVTy == Step->getType() &&
2361          "Val and Step should have the same type");
2362 
2363   // We build scalar steps for both integer and floating-point induction
2364   // variables. Here, we determine the kind of arithmetic we will perform.
2365   Instruction::BinaryOps AddOp;
2366   Instruction::BinaryOps MulOp;
2367   if (ScalarIVTy->isIntegerTy()) {
2368     AddOp = Instruction::Add;
2369     MulOp = Instruction::Mul;
2370   } else {
2371     AddOp = ID.getInductionOpcode();
2372     MulOp = Instruction::FMul;
2373   }
2374 
2375   // Determine the number of scalars we need to generate for each unroll
2376   // iteration.
2377   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
2378   unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2379   // Compute the scalar steps and save the results in State.
2380   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2381                                      ScalarIVTy->getScalarSizeInBits());
2382   Type *VecIVTy = nullptr;
2383   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2384   if (!FirstLaneOnly && State.VF.isScalable()) {
2385     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2386     UnitStepVec =
2387         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2388     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2389     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2390   }
2391 
2392   for (unsigned Part = 0; Part < State.UF; ++Part) {
2393     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2394 
2395     if (!FirstLaneOnly && State.VF.isScalable()) {
2396       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2397       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2398       if (ScalarIVTy->isFloatingPointTy())
2399         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2400       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2401       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2402       State.set(Def, Add, Part);
2403       // It's useful to record the lane values too for the known minimum number
2404       // of elements so we do those below. This improves the code quality when
2405       // trying to extract the first element, for example.
2406     }
2407 
2408     if (ScalarIVTy->isFloatingPointTy())
2409       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2410 
2411     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2412       Value *StartIdx = Builder.CreateBinOp(
2413           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2414       // The step returned by `createStepForVF` is a runtime-evaluated value
2415       // when VF is scalable. Otherwise, it should be folded into a Constant.
2416       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2417              "Expected StartIdx to be folded to a constant when VF is not "
2418              "scalable");
2419       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2420       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2421       State.set(Def, Add, VPIteration(Part, Lane));
2422     }
2423   }
2424 }
2425 
2426 // Generate code for the induction step. Note that induction steps are
2427 // required to be loop-invariant
2428 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE,
2429                               Instruction *InsertBefore,
2430                               Loop *OrigLoop = nullptr) {
2431   const DataLayout &DL = SE.getDataLayout();
2432   assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) &&
2433          "Induction step should be loop invariant");
2434   if (auto *E = dyn_cast<SCEVUnknown>(Step))
2435     return E->getValue();
2436 
2437   SCEVExpander Exp(SE, DL, "induction");
2438   return Exp.expandCodeFor(Step, Step->getType(), InsertBefore);
2439 }
2440 
2441 /// Compute the transformed value of Index at offset StartValue using step
2442 /// StepValue.
2443 /// For integer induction, returns StartValue + Index * StepValue.
2444 /// For pointer induction, returns StartValue[Index * StepValue].
2445 /// FIXME: The newly created binary instructions should contain nsw/nuw
2446 /// flags, which can be found from the original scalar operations.
2447 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index,
2448                                    Value *StartValue, Value *Step,
2449                                    const InductionDescriptor &ID) {
2450   assert(Index->getType()->getScalarType() == Step->getType() &&
2451          "Index scalar type does not match StepValue type");
2452 
2453   // Note: the IR at this point is broken. We cannot use SE to create any new
2454   // SCEV and then expand it, hoping that SCEV's simplification will give us
2455   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2456   // lead to various SCEV crashes. So all we can do is to use builder and rely
2457   // on InstCombine for future simplifications. Here we handle some trivial
2458   // cases only.
2459   auto CreateAdd = [&B](Value *X, Value *Y) {
2460     assert(X->getType() == Y->getType() && "Types don't match!");
2461     if (auto *CX = dyn_cast<ConstantInt>(X))
2462       if (CX->isZero())
2463         return Y;
2464     if (auto *CY = dyn_cast<ConstantInt>(Y))
2465       if (CY->isZero())
2466         return X;
2467     return B.CreateAdd(X, Y);
2468   };
2469 
2470   // We allow X to be a vector type, in which case Y will potentially be
2471   // splatted into a vector with the same element count.
2472   auto CreateMul = [&B](Value *X, Value *Y) {
2473     assert(X->getType()->getScalarType() == Y->getType() &&
2474            "Types don't match!");
2475     if (auto *CX = dyn_cast<ConstantInt>(X))
2476       if (CX->isOne())
2477         return Y;
2478     if (auto *CY = dyn_cast<ConstantInt>(Y))
2479       if (CY->isOne())
2480         return X;
2481     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2482     if (XVTy && !isa<VectorType>(Y->getType()))
2483       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2484     return B.CreateMul(X, Y);
2485   };
2486 
2487   switch (ID.getKind()) {
2488   case InductionDescriptor::IK_IntInduction: {
2489     assert(!isa<VectorType>(Index->getType()) &&
2490            "Vector indices not supported for integer inductions yet");
2491     assert(Index->getType() == StartValue->getType() &&
2492            "Index type does not match StartValue type");
2493     if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2494       return B.CreateSub(StartValue, Index);
2495     auto *Offset = CreateMul(Index, Step);
2496     return CreateAdd(StartValue, Offset);
2497   }
2498   case InductionDescriptor::IK_PtrInduction: {
2499     assert(isa<Constant>(Step) &&
2500            "Expected constant step for pointer induction");
2501     return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step));
2502   }
2503   case InductionDescriptor::IK_FpInduction: {
2504     assert(!isa<VectorType>(Index->getType()) &&
2505            "Vector indices not supported for FP inductions yet");
2506     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2507     auto InductionBinOp = ID.getInductionBinOp();
2508     assert(InductionBinOp &&
2509            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2510             InductionBinOp->getOpcode() == Instruction::FSub) &&
2511            "Original bin op should be defined for FP induction");
2512 
2513     Value *MulExp = B.CreateFMul(Step, Index);
2514     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2515                          "induction");
2516   }
2517   case InductionDescriptor::IK_NoInduction:
2518     return nullptr;
2519   }
2520   llvm_unreachable("invalid enum");
2521 }
2522 
2523 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2524                                                     const VPIteration &Instance,
2525                                                     VPTransformState &State) {
2526   Value *ScalarInst = State.get(Def, Instance);
2527   Value *VectorValue = State.get(Def, Instance.Part);
2528   VectorValue = Builder.CreateInsertElement(
2529       VectorValue, ScalarInst,
2530       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2531   State.set(Def, VectorValue, Instance.Part);
2532 }
2533 
2534 // Return whether we allow using masked interleave-groups (for dealing with
2535 // strided loads/stores that reside in predicated blocks, or for dealing
2536 // with gaps).
2537 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2538   // If an override option has been passed in for interleaved accesses, use it.
2539   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2540     return EnableMaskedInterleavedMemAccesses;
2541 
2542   return TTI.enableMaskedInterleavedAccessVectorization();
2543 }
2544 
2545 // Try to vectorize the interleave group that \p Instr belongs to.
2546 //
2547 // E.g. Translate following interleaved load group (factor = 3):
2548 //   for (i = 0; i < N; i+=3) {
2549 //     R = Pic[i];             // Member of index 0
2550 //     G = Pic[i+1];           // Member of index 1
2551 //     B = Pic[i+2];           // Member of index 2
2552 //     ... // do something to R, G, B
2553 //   }
2554 // To:
2555 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2556 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2557 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2558 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2559 //
2560 // Or translate following interleaved store group (factor = 3):
2561 //   for (i = 0; i < N; i+=3) {
2562 //     ... do something to R, G, B
2563 //     Pic[i]   = R;           // Member of index 0
2564 //     Pic[i+1] = G;           // Member of index 1
2565 //     Pic[i+2] = B;           // Member of index 2
2566 //   }
2567 // To:
2568 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2569 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2570 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2571 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2572 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2573 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2574     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2575     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2576     VPValue *BlockInMask) {
2577   Instruction *Instr = Group->getInsertPos();
2578   const DataLayout &DL = Instr->getModule()->getDataLayout();
2579 
2580   // Prepare for the vector type of the interleaved load/store.
2581   Type *ScalarTy = getLoadStoreType(Instr);
2582   unsigned InterleaveFactor = Group->getFactor();
2583   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2584   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2585 
2586   // Prepare for the new pointers.
2587   SmallVector<Value *, 2> AddrParts;
2588   unsigned Index = Group->getIndex(Instr);
2589 
2590   // TODO: extend the masked interleaved-group support to reversed access.
2591   assert((!BlockInMask || !Group->isReverse()) &&
2592          "Reversed masked interleave-group not supported.");
2593 
2594   // If the group is reverse, adjust the index to refer to the last vector lane
2595   // instead of the first. We adjust the index from the first vector lane,
2596   // rather than directly getting the pointer for lane VF - 1, because the
2597   // pointer operand of the interleaved access is supposed to be uniform. For
2598   // uniform instructions, we're only required to generate a value for the
2599   // first vector lane in each unroll iteration.
2600   if (Group->isReverse())
2601     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2602 
2603   for (unsigned Part = 0; Part < UF; Part++) {
2604     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2605     setDebugLocFromInst(AddrPart);
2606 
2607     // Notice current instruction could be any index. Need to adjust the address
2608     // to the member of index 0.
2609     //
2610     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2611     //       b = A[i];       // Member of index 0
2612     // Current pointer is pointed to A[i+1], adjust it to A[i].
2613     //
2614     // E.g.  A[i+1] = a;     // Member of index 1
2615     //       A[i]   = b;     // Member of index 0
2616     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2617     // Current pointer is pointed to A[i+2], adjust it to A[i].
2618 
2619     bool InBounds = false;
2620     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2621       InBounds = gep->isInBounds();
2622     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2623     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2624 
2625     // Cast to the vector pointer type.
2626     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2627     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2628     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2629   }
2630 
2631   setDebugLocFromInst(Instr);
2632   Value *PoisonVec = PoisonValue::get(VecTy);
2633 
2634   Value *MaskForGaps = nullptr;
2635   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2636     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2637     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2638   }
2639 
2640   // Vectorize the interleaved load group.
2641   if (isa<LoadInst>(Instr)) {
2642     // For each unroll part, create a wide load for the group.
2643     SmallVector<Value *, 2> NewLoads;
2644     for (unsigned Part = 0; Part < UF; Part++) {
2645       Instruction *NewLoad;
2646       if (BlockInMask || MaskForGaps) {
2647         assert(useMaskedInterleavedAccesses(*TTI) &&
2648                "masked interleaved groups are not allowed.");
2649         Value *GroupMask = MaskForGaps;
2650         if (BlockInMask) {
2651           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2652           Value *ShuffledMask = Builder.CreateShuffleVector(
2653               BlockInMaskPart,
2654               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2655               "interleaved.mask");
2656           GroupMask = MaskForGaps
2657                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2658                                                 MaskForGaps)
2659                           : ShuffledMask;
2660         }
2661         NewLoad =
2662             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2663                                      GroupMask, PoisonVec, "wide.masked.vec");
2664       }
2665       else
2666         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2667                                             Group->getAlign(), "wide.vec");
2668       Group->addMetadata(NewLoad);
2669       NewLoads.push_back(NewLoad);
2670     }
2671 
2672     // For each member in the group, shuffle out the appropriate data from the
2673     // wide loads.
2674     unsigned J = 0;
2675     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2676       Instruction *Member = Group->getMember(I);
2677 
2678       // Skip the gaps in the group.
2679       if (!Member)
2680         continue;
2681 
2682       auto StrideMask =
2683           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2684       for (unsigned Part = 0; Part < UF; Part++) {
2685         Value *StridedVec = Builder.CreateShuffleVector(
2686             NewLoads[Part], StrideMask, "strided.vec");
2687 
2688         // If this member has different type, cast the result type.
2689         if (Member->getType() != ScalarTy) {
2690           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2691           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2692           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2693         }
2694 
2695         if (Group->isReverse())
2696           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2697 
2698         State.set(VPDefs[J], StridedVec, Part);
2699       }
2700       ++J;
2701     }
2702     return;
2703   }
2704 
2705   // The sub vector type for current instruction.
2706   auto *SubVT = VectorType::get(ScalarTy, VF);
2707 
2708   // Vectorize the interleaved store group.
2709   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2710   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2711          "masked interleaved groups are not allowed.");
2712   assert((!MaskForGaps || !VF.isScalable()) &&
2713          "masking gaps for scalable vectors is not yet supported.");
2714   for (unsigned Part = 0; Part < UF; Part++) {
2715     // Collect the stored vector from each member.
2716     SmallVector<Value *, 4> StoredVecs;
2717     for (unsigned i = 0; i < InterleaveFactor; i++) {
2718       assert((Group->getMember(i) || MaskForGaps) &&
2719              "Fail to get a member from an interleaved store group");
2720       Instruction *Member = Group->getMember(i);
2721 
2722       // Skip the gaps in the group.
2723       if (!Member) {
2724         Value *Undef = PoisonValue::get(SubVT);
2725         StoredVecs.push_back(Undef);
2726         continue;
2727       }
2728 
2729       Value *StoredVec = State.get(StoredValues[i], Part);
2730 
2731       if (Group->isReverse())
2732         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2733 
2734       // If this member has different type, cast it to a unified type.
2735 
2736       if (StoredVec->getType() != SubVT)
2737         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2738 
2739       StoredVecs.push_back(StoredVec);
2740     }
2741 
2742     // Concatenate all vectors into a wide vector.
2743     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2744 
2745     // Interleave the elements in the wide vector.
2746     Value *IVec = Builder.CreateShuffleVector(
2747         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2748         "interleaved.vec");
2749 
2750     Instruction *NewStoreInstr;
2751     if (BlockInMask || MaskForGaps) {
2752       Value *GroupMask = MaskForGaps;
2753       if (BlockInMask) {
2754         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2755         Value *ShuffledMask = Builder.CreateShuffleVector(
2756             BlockInMaskPart,
2757             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2758             "interleaved.mask");
2759         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2760                                                       ShuffledMask, MaskForGaps)
2761                                 : ShuffledMask;
2762       }
2763       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2764                                                 Group->getAlign(), GroupMask);
2765     } else
2766       NewStoreInstr =
2767           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2768 
2769     Group->addMetadata(NewStoreInstr);
2770   }
2771 }
2772 
2773 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2774                                                VPReplicateRecipe *RepRecipe,
2775                                                const VPIteration &Instance,
2776                                                bool IfPredicateInstr,
2777                                                VPTransformState &State) {
2778   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2779 
2780   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2781   // the first lane and part.
2782   if (isa<NoAliasScopeDeclInst>(Instr))
2783     if (!Instance.isFirstIteration())
2784       return;
2785 
2786   setDebugLocFromInst(Instr);
2787 
2788   // Does this instruction return a value ?
2789   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2790 
2791   Instruction *Cloned = Instr->clone();
2792   if (!IsVoidRetTy)
2793     Cloned->setName(Instr->getName() + ".cloned");
2794 
2795   // If the scalarized instruction contributes to the address computation of a
2796   // widen masked load/store which was in a basic block that needed predication
2797   // and is not predicated after vectorization, we can't propagate
2798   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2799   // instruction could feed a poison value to the base address of the widen
2800   // load/store.
2801   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2802     Cloned->dropPoisonGeneratingFlags();
2803 
2804   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2805                                Builder.GetInsertPoint());
2806   // Replace the operands of the cloned instructions with their scalar
2807   // equivalents in the new loop.
2808   for (auto &I : enumerate(RepRecipe->operands())) {
2809     auto InputInstance = Instance;
2810     VPValue *Operand = I.value();
2811     VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand);
2812     if (OperandR && OperandR->isUniform())
2813       InputInstance.Lane = VPLane::getFirstLane();
2814     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2815   }
2816   addNewMetadata(Cloned, Instr);
2817 
2818   // Place the cloned scalar in the new loop.
2819   Builder.Insert(Cloned);
2820 
2821   State.set(RepRecipe, Cloned, Instance);
2822 
2823   // If we just cloned a new assumption, add it the assumption cache.
2824   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2825     AC->registerAssumption(II);
2826 
2827   // End if-block.
2828   if (IfPredicateInstr)
2829     PredicatedInstructions.push_back(Cloned);
2830 }
2831 
2832 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) {
2833   if (TripCount)
2834     return TripCount;
2835 
2836   assert(InsertBlock);
2837   IRBuilder<> Builder(InsertBlock->getTerminator());
2838   // Find the loop boundaries.
2839   ScalarEvolution *SE = PSE.getSE();
2840   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2841   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2842          "Invalid loop count");
2843 
2844   Type *IdxTy = Legal->getWidestInductionType();
2845   assert(IdxTy && "No type for induction");
2846 
2847   // The exit count might have the type of i64 while the phi is i32. This can
2848   // happen if we have an induction variable that is sign extended before the
2849   // compare. The only way that we get a backedge taken count is that the
2850   // induction variable was signed and as such will not overflow. In such a case
2851   // truncation is legal.
2852   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2853       IdxTy->getPrimitiveSizeInBits())
2854     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2855   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2856 
2857   // Get the total trip count from the count by adding 1.
2858   const SCEV *ExitCount = SE->getAddExpr(
2859       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2860 
2861   const DataLayout &DL = InsertBlock->getModule()->getDataLayout();
2862 
2863   // Expand the trip count and place the new instructions in the preheader.
2864   // Notice that the pre-header does not change, only the loop body.
2865   SCEVExpander Exp(*SE, DL, "induction");
2866 
2867   // Count holds the overall loop count (N).
2868   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2869                                 InsertBlock->getTerminator());
2870 
2871   if (TripCount->getType()->isPointerTy())
2872     TripCount =
2873         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2874                                     InsertBlock->getTerminator());
2875 
2876   return TripCount;
2877 }
2878 
2879 Value *
2880 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) {
2881   if (VectorTripCount)
2882     return VectorTripCount;
2883 
2884   Value *TC = getOrCreateTripCount(InsertBlock);
2885   IRBuilder<> Builder(InsertBlock->getTerminator());
2886 
2887   Type *Ty = TC->getType();
2888   // This is where we can make the step a runtime constant.
2889   Value *Step = createStepForVF(Builder, Ty, VF, UF);
2890 
2891   // If the tail is to be folded by masking, round the number of iterations N
2892   // up to a multiple of Step instead of rounding down. This is done by first
2893   // adding Step-1 and then rounding down. Note that it's ok if this addition
2894   // overflows: the vector induction variable will eventually wrap to zero given
2895   // that it starts at zero and its Step is a power of two; the loop will then
2896   // exit, with the last early-exit vector comparison also producing all-true.
2897   if (Cost->foldTailByMasking()) {
2898     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2899            "VF*UF must be a power of 2 when folding tail by masking");
2900     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
2901     TC = Builder.CreateAdd(
2902         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
2903   }
2904 
2905   // Now we need to generate the expression for the part of the loop that the
2906   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2907   // iterations are not required for correctness, or N - Step, otherwise. Step
2908   // is equal to the vectorization factor (number of SIMD elements) times the
2909   // unroll factor (number of SIMD instructions).
2910   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2911 
2912   // There are cases where we *must* run at least one iteration in the remainder
2913   // loop.  See the cost model for when this can happen.  If the step evenly
2914   // divides the trip count, we set the remainder to be equal to the step. If
2915   // the step does not evenly divide the trip count, no adjustment is necessary
2916   // since there will already be scalar iterations. Note that the minimum
2917   // iterations check ensures that N >= Step.
2918   if (Cost->requiresScalarEpilogue(VF)) {
2919     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2920     R = Builder.CreateSelect(IsZero, Step, R);
2921   }
2922 
2923   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2924 
2925   return VectorTripCount;
2926 }
2927 
2928 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2929                                                    const DataLayout &DL) {
2930   // Verify that V is a vector type with same number of elements as DstVTy.
2931   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2932   unsigned VF = DstFVTy->getNumElements();
2933   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2934   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2935   Type *SrcElemTy = SrcVecTy->getElementType();
2936   Type *DstElemTy = DstFVTy->getElementType();
2937   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2938          "Vector elements must have same size");
2939 
2940   // Do a direct cast if element types are castable.
2941   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2942     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2943   }
2944   // V cannot be directly casted to desired vector type.
2945   // May happen when V is a floating point vector but DstVTy is a vector of
2946   // pointers or vice-versa. Handle this using a two-step bitcast using an
2947   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2948   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2949          "Only one type should be a pointer type");
2950   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2951          "Only one type should be a floating point type");
2952   Type *IntTy =
2953       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2954   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2955   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2956   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2957 }
2958 
2959 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) {
2960   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
2961   // Reuse existing vector loop preheader for TC checks.
2962   // Note that new preheader block is generated for vector loop.
2963   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2964   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2965 
2966   // Generate code to check if the loop's trip count is less than VF * UF, or
2967   // equal to it in case a scalar epilogue is required; this implies that the
2968   // vector trip count is zero. This check also covers the case where adding one
2969   // to the backedge-taken count overflowed leading to an incorrect trip count
2970   // of zero. In this case we will also jump to the scalar loop.
2971   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
2972                                             : ICmpInst::ICMP_ULT;
2973 
2974   // If tail is to be folded, vector loop takes care of all iterations.
2975   Value *CheckMinIters = Builder.getFalse();
2976   if (!Cost->foldTailByMasking()) {
2977     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
2978     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2979   }
2980   // Create new preheader for vector loop.
2981   LoopVectorPreHeader =
2982       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2983                  "vector.ph");
2984 
2985   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2986                                DT->getNode(Bypass)->getIDom()) &&
2987          "TC check is expected to dominate Bypass");
2988 
2989   // Update dominator for Bypass & LoopExit (if needed).
2990   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2991   if (!Cost->requiresScalarEpilogue(VF))
2992     // If there is an epilogue which must run, there's no edge from the
2993     // middle block to exit blocks  and thus no need to update the immediate
2994     // dominator of the exit blocks.
2995     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2996 
2997   ReplaceInstWithInst(
2998       TCCheckBlock->getTerminator(),
2999       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3000   LoopBypassBlocks.push_back(TCCheckBlock);
3001 }
3002 
3003 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
3004 
3005   BasicBlock *const SCEVCheckBlock =
3006       RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock);
3007   if (!SCEVCheckBlock)
3008     return nullptr;
3009 
3010   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3011            (OptForSizeBasedOnProfile &&
3012             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3013          "Cannot SCEV check stride or overflow when optimizing for size");
3014 
3015 
3016   // Update dominator only if this is first RT check.
3017   if (LoopBypassBlocks.empty()) {
3018     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3019     if (!Cost->requiresScalarEpilogue(VF))
3020       // If there is an epilogue which must run, there's no edge from the
3021       // middle block to exit blocks  and thus no need to update the immediate
3022       // dominator of the exit blocks.
3023       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3024   }
3025 
3026   LoopBypassBlocks.push_back(SCEVCheckBlock);
3027   AddedSafetyChecks = true;
3028   return SCEVCheckBlock;
3029 }
3030 
3031 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
3032   // VPlan-native path does not do any analysis for runtime checks currently.
3033   if (EnableVPlanNativePath)
3034     return nullptr;
3035 
3036   BasicBlock *const MemCheckBlock =
3037       RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader);
3038 
3039   // Check if we generated code that checks in runtime if arrays overlap. We put
3040   // the checks into a separate block to make the more common case of few
3041   // elements faster.
3042   if (!MemCheckBlock)
3043     return nullptr;
3044 
3045   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3046     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3047            "Cannot emit memory checks when optimizing for size, unless forced "
3048            "to vectorize.");
3049     ORE->emit([&]() {
3050       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3051                                         OrigLoop->getStartLoc(),
3052                                         OrigLoop->getHeader())
3053              << "Code-size may be reduced by not forcing "
3054                 "vectorization, or by source-code modifications "
3055                 "eliminating the need for runtime checks "
3056                 "(e.g., adding 'restrict').";
3057     });
3058   }
3059 
3060   LoopBypassBlocks.push_back(MemCheckBlock);
3061 
3062   AddedSafetyChecks = true;
3063 
3064   // We currently don't use LoopVersioning for the actual loop cloning but we
3065   // still use it to add the noalias metadata.
3066   LVer = std::make_unique<LoopVersioning>(
3067       *Legal->getLAI(),
3068       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3069       DT, PSE.getSE());
3070   LVer->prepareNoAliasMetadata();
3071   return MemCheckBlock;
3072 }
3073 
3074 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3075   LoopScalarBody = OrigLoop->getHeader();
3076   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3077   assert(LoopVectorPreHeader && "Invalid loop structure");
3078   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3079   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3080          "multiple exit loop without required epilogue?");
3081 
3082   LoopMiddleBlock =
3083       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3084                  LI, nullptr, Twine(Prefix) + "middle.block");
3085   LoopScalarPreHeader =
3086       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3087                  nullptr, Twine(Prefix) + "scalar.ph");
3088 
3089   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3090 
3091   // Set up the middle block terminator.  Two cases:
3092   // 1) If we know that we must execute the scalar epilogue, emit an
3093   //    unconditional branch.
3094   // 2) Otherwise, we must have a single unique exit block (due to how we
3095   //    implement the multiple exit case).  In this case, set up a conditonal
3096   //    branch from the middle block to the loop scalar preheader, and the
3097   //    exit block.  completeLoopSkeleton will update the condition to use an
3098   //    iteration check, if required to decide whether to execute the remainder.
3099   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3100     BranchInst::Create(LoopScalarPreHeader) :
3101     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3102                        Builder.getTrue());
3103   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3104   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3105 
3106   SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3107              nullptr, nullptr, Twine(Prefix) + "vector.body");
3108 
3109   // Update dominator for loop exit.
3110   if (!Cost->requiresScalarEpilogue(VF))
3111     // If there is an epilogue which must run, there's no edge from the
3112     // middle block to exit blocks  and thus no need to update the immediate
3113     // dominator of the exit blocks.
3114     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3115 }
3116 
3117 void InnerLoopVectorizer::createInductionResumeValues(
3118     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3119   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3120           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3121          "Inconsistent information about additional bypass.");
3122 
3123   Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
3124   assert(VectorTripCount && "Expected valid arguments");
3125   // We are going to resume the execution of the scalar loop.
3126   // Go over all of the induction variables that we found and fix the
3127   // PHIs that are left in the scalar version of the loop.
3128   // The starting values of PHI nodes depend on the counter of the last
3129   // iteration in the vectorized loop.
3130   // If we come from a bypass edge then we need to start from the original
3131   // start value.
3132   Instruction *OldInduction = Legal->getPrimaryInduction();
3133   for (auto &InductionEntry : Legal->getInductionVars()) {
3134     PHINode *OrigPhi = InductionEntry.first;
3135     InductionDescriptor II = InductionEntry.second;
3136 
3137     // Create phi nodes to merge from the  backedge-taken check block.
3138     PHINode *BCResumeVal =
3139         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3140                         LoopScalarPreHeader->getTerminator());
3141     // Copy original phi DL over to the new one.
3142     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3143     Value *&EndValue = IVEndValues[OrigPhi];
3144     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3145     if (OrigPhi == OldInduction) {
3146       // We know what the end value is.
3147       EndValue = VectorTripCount;
3148     } else {
3149       IRBuilder<> B(LoopVectorPreHeader->getTerminator());
3150 
3151       // Fast-math-flags propagate from the original induction instruction.
3152       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3153         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3154 
3155       Type *StepType = II.getStep()->getType();
3156       Instruction::CastOps CastOp =
3157           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3158       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3159       Value *Step =
3160           CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3161       EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3162       EndValue->setName("ind.end");
3163 
3164       // Compute the end value for the additional bypass (if applicable).
3165       if (AdditionalBypass.first) {
3166         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3167         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3168                                          StepType, true);
3169         Value *Step =
3170             CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3171         CRD =
3172             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3173         EndValueFromAdditionalBypass =
3174             emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3175         EndValueFromAdditionalBypass->setName("ind.end");
3176       }
3177     }
3178     // The new PHI merges the original incoming value, in case of a bypass,
3179     // or the value at the end of the vectorized loop.
3180     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3181 
3182     // Fix the scalar body counter (PHI node).
3183     // The old induction's phi node in the scalar body needs the truncated
3184     // value.
3185     for (BasicBlock *BB : LoopBypassBlocks)
3186       BCResumeVal->addIncoming(II.getStartValue(), BB);
3187 
3188     if (AdditionalBypass.first)
3189       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3190                                             EndValueFromAdditionalBypass);
3191 
3192     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3193   }
3194 }
3195 
3196 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) {
3197   // The trip counts should be cached by now.
3198   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
3199   Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
3200 
3201   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3202 
3203   // Add a check in the middle block to see if we have completed
3204   // all of the iterations in the first vector loop.  Three cases:
3205   // 1) If we require a scalar epilogue, there is no conditional branch as
3206   //    we unconditionally branch to the scalar preheader.  Do nothing.
3207   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3208   //    Thus if tail is to be folded, we know we don't need to run the
3209   //    remainder and we can use the previous value for the condition (true).
3210   // 3) Otherwise, construct a runtime check.
3211   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3212     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3213                                         Count, VectorTripCount, "cmp.n",
3214                                         LoopMiddleBlock->getTerminator());
3215 
3216     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3217     // of the corresponding compare because they may have ended up with
3218     // different line numbers and we want to avoid awkward line stepping while
3219     // debugging. Eg. if the compare has got a line number inside the loop.
3220     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3221     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3222   }
3223 
3224 #ifdef EXPENSIVE_CHECKS
3225   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3226 #endif
3227 
3228   return LoopVectorPreHeader;
3229 }
3230 
3231 std::pair<BasicBlock *, Value *>
3232 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3233   /*
3234    In this function we generate a new loop. The new loop will contain
3235    the vectorized instructions while the old loop will continue to run the
3236    scalar remainder.
3237 
3238        [ ] <-- loop iteration number check.
3239     /   |
3240    /    v
3241   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3242   |  /  |
3243   | /   v
3244   ||   [ ]     <-- vector pre header.
3245   |/    |
3246   |     v
3247   |    [  ] \
3248   |    [  ]_|   <-- vector loop.
3249   |     |
3250   |     v
3251   \   -[ ]   <--- middle-block.
3252    \/   |
3253    /\   v
3254    | ->[ ]     <--- new preheader.
3255    |    |
3256  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3257    |   [ ] \
3258    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3259     \   |
3260      \  v
3261       >[ ]     <-- exit block(s).
3262    ...
3263    */
3264 
3265   // Get the metadata of the original loop before it gets modified.
3266   MDNode *OrigLoopID = OrigLoop->getLoopID();
3267 
3268   // Workaround!  Compute the trip count of the original loop and cache it
3269   // before we start modifying the CFG.  This code has a systemic problem
3270   // wherein it tries to run analysis over partially constructed IR; this is
3271   // wrong, and not simply for SCEV.  The trip count of the original loop
3272   // simply happens to be prone to hitting this in practice.  In theory, we
3273   // can hit the same issue for any SCEV, or ValueTracking query done during
3274   // mutation.  See PR49900.
3275   getOrCreateTripCount(OrigLoop->getLoopPreheader());
3276 
3277   // Create an empty vector loop, and prepare basic blocks for the runtime
3278   // checks.
3279   createVectorLoopSkeleton("");
3280 
3281   // Now, compare the new count to zero. If it is zero skip the vector loop and
3282   // jump to the scalar loop. This check also covers the case where the
3283   // backedge-taken count is uint##_max: adding one to it will overflow leading
3284   // to an incorrect trip count of zero. In this (rare) case we will also jump
3285   // to the scalar loop.
3286   emitMinimumIterationCountCheck(LoopScalarPreHeader);
3287 
3288   // Generate the code to check any assumptions that we've made for SCEV
3289   // expressions.
3290   emitSCEVChecks(LoopScalarPreHeader);
3291 
3292   // Generate the code that checks in runtime if arrays overlap. We put the
3293   // checks into a separate block to make the more common case of few elements
3294   // faster.
3295   emitMemRuntimeChecks(LoopScalarPreHeader);
3296 
3297   // Emit phis for the new starting index of the scalar loop.
3298   createInductionResumeValues();
3299 
3300   return {completeLoopSkeleton(OrigLoopID), nullptr};
3301 }
3302 
3303 // Fix up external users of the induction variable. At this point, we are
3304 // in LCSSA form, with all external PHIs that use the IV having one input value,
3305 // coming from the remainder loop. We need those PHIs to also have a correct
3306 // value for the IV when arriving directly from the middle block.
3307 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3308                                        const InductionDescriptor &II,
3309                                        Value *CountRoundDown, Value *EndValue,
3310                                        BasicBlock *MiddleBlock,
3311                                        BasicBlock *VectorHeader) {
3312   // There are two kinds of external IV usages - those that use the value
3313   // computed in the last iteration (the PHI) and those that use the penultimate
3314   // value (the value that feeds into the phi from the loop latch).
3315   // We allow both, but they, obviously, have different values.
3316 
3317   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3318 
3319   DenseMap<Value *, Value *> MissingVals;
3320 
3321   // An external user of the last iteration's value should see the value that
3322   // the remainder loop uses to initialize its own IV.
3323   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3324   for (User *U : PostInc->users()) {
3325     Instruction *UI = cast<Instruction>(U);
3326     if (!OrigLoop->contains(UI)) {
3327       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3328       MissingVals[UI] = EndValue;
3329     }
3330   }
3331 
3332   // An external user of the penultimate value need to see EndValue - Step.
3333   // The simplest way to get this is to recompute it from the constituent SCEVs,
3334   // that is Start + (Step * (CRD - 1)).
3335   for (User *U : OrigPhi->users()) {
3336     auto *UI = cast<Instruction>(U);
3337     if (!OrigLoop->contains(UI)) {
3338       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3339 
3340       IRBuilder<> B(MiddleBlock->getTerminator());
3341 
3342       // Fast-math-flags propagate from the original induction instruction.
3343       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3344         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3345 
3346       Value *CountMinusOne = B.CreateSub(
3347           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3348       Value *CMO =
3349           !II.getStep()->getType()->isIntegerTy()
3350               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3351                              II.getStep()->getType())
3352               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3353       CMO->setName("cast.cmo");
3354 
3355       Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
3356                                     VectorHeader->getTerminator());
3357       Value *Escape =
3358           emitTransformedIndex(B, CMO, II.getStartValue(), Step, II);
3359       Escape->setName("ind.escape");
3360       MissingVals[UI] = Escape;
3361     }
3362   }
3363 
3364   for (auto &I : MissingVals) {
3365     PHINode *PHI = cast<PHINode>(I.first);
3366     // One corner case we have to handle is two IVs "chasing" each-other,
3367     // that is %IV2 = phi [...], [ %IV1, %latch ]
3368     // In this case, if IV1 has an external use, we need to avoid adding both
3369     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3370     // don't already have an incoming value for the middle block.
3371     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3372       PHI->addIncoming(I.second, MiddleBlock);
3373   }
3374 }
3375 
3376 namespace {
3377 
3378 struct CSEDenseMapInfo {
3379   static bool canHandle(const Instruction *I) {
3380     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3381            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3382   }
3383 
3384   static inline Instruction *getEmptyKey() {
3385     return DenseMapInfo<Instruction *>::getEmptyKey();
3386   }
3387 
3388   static inline Instruction *getTombstoneKey() {
3389     return DenseMapInfo<Instruction *>::getTombstoneKey();
3390   }
3391 
3392   static unsigned getHashValue(const Instruction *I) {
3393     assert(canHandle(I) && "Unknown instruction!");
3394     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3395                                                            I->value_op_end()));
3396   }
3397 
3398   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3399     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3400         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3401       return LHS == RHS;
3402     return LHS->isIdenticalTo(RHS);
3403   }
3404 };
3405 
3406 } // end anonymous namespace
3407 
3408 ///Perform cse of induction variable instructions.
3409 static void cse(BasicBlock *BB) {
3410   // Perform simple cse.
3411   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3412   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3413     if (!CSEDenseMapInfo::canHandle(&In))
3414       continue;
3415 
3416     // Check if we can replace this instruction with any of the
3417     // visited instructions.
3418     if (Instruction *V = CSEMap.lookup(&In)) {
3419       In.replaceAllUsesWith(V);
3420       In.eraseFromParent();
3421       continue;
3422     }
3423 
3424     CSEMap[&In] = &In;
3425   }
3426 }
3427 
3428 InstructionCost
3429 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3430                                               bool &NeedToScalarize) const {
3431   Function *F = CI->getCalledFunction();
3432   Type *ScalarRetTy = CI->getType();
3433   SmallVector<Type *, 4> Tys, ScalarTys;
3434   for (auto &ArgOp : CI->args())
3435     ScalarTys.push_back(ArgOp->getType());
3436 
3437   // Estimate cost of scalarized vector call. The source operands are assumed
3438   // to be vectors, so we need to extract individual elements from there,
3439   // execute VF scalar calls, and then gather the result into the vector return
3440   // value.
3441   InstructionCost ScalarCallCost =
3442       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3443   if (VF.isScalar())
3444     return ScalarCallCost;
3445 
3446   // Compute corresponding vector type for return value and arguments.
3447   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3448   for (Type *ScalarTy : ScalarTys)
3449     Tys.push_back(ToVectorTy(ScalarTy, VF));
3450 
3451   // Compute costs of unpacking argument values for the scalar calls and
3452   // packing the return values to a vector.
3453   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3454 
3455   InstructionCost Cost =
3456       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3457 
3458   // If we can't emit a vector call for this function, then the currently found
3459   // cost is the cost we need to return.
3460   NeedToScalarize = true;
3461   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3462   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3463 
3464   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3465     return Cost;
3466 
3467   // If the corresponding vector cost is cheaper, return its cost.
3468   InstructionCost VectorCallCost =
3469       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3470   if (VectorCallCost < Cost) {
3471     NeedToScalarize = false;
3472     Cost = VectorCallCost;
3473   }
3474   return Cost;
3475 }
3476 
3477 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3478   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3479     return Elt;
3480   return VectorType::get(Elt, VF);
3481 }
3482 
3483 InstructionCost
3484 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3485                                                    ElementCount VF) const {
3486   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3487   assert(ID && "Expected intrinsic call!");
3488   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3489   FastMathFlags FMF;
3490   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3491     FMF = FPMO->getFastMathFlags();
3492 
3493   SmallVector<const Value *> Arguments(CI->args());
3494   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3495   SmallVector<Type *> ParamTys;
3496   std::transform(FTy->param_begin(), FTy->param_end(),
3497                  std::back_inserter(ParamTys),
3498                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3499 
3500   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3501                                     dyn_cast<IntrinsicInst>(CI));
3502   return TTI.getIntrinsicInstrCost(CostAttrs,
3503                                    TargetTransformInfo::TCK_RecipThroughput);
3504 }
3505 
3506 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3507   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3508   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3509   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3510 }
3511 
3512 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3513   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3514   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3515   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3516 }
3517 
3518 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3519   // For every instruction `I` in MinBWs, truncate the operands, create a
3520   // truncated version of `I` and reextend its result. InstCombine runs
3521   // later and will remove any ext/trunc pairs.
3522   SmallPtrSet<Value *, 4> Erased;
3523   for (const auto &KV : Cost->getMinimalBitwidths()) {
3524     // If the value wasn't vectorized, we must maintain the original scalar
3525     // type. The absence of the value from State indicates that it
3526     // wasn't vectorized.
3527     // FIXME: Should not rely on getVPValue at this point.
3528     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3529     if (!State.hasAnyVectorValue(Def))
3530       continue;
3531     for (unsigned Part = 0; Part < UF; ++Part) {
3532       Value *I = State.get(Def, Part);
3533       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3534         continue;
3535       Type *OriginalTy = I->getType();
3536       Type *ScalarTruncatedTy =
3537           IntegerType::get(OriginalTy->getContext(), KV.second);
3538       auto *TruncatedTy = VectorType::get(
3539           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3540       if (TruncatedTy == OriginalTy)
3541         continue;
3542 
3543       IRBuilder<> B(cast<Instruction>(I));
3544       auto ShrinkOperand = [&](Value *V) -> Value * {
3545         if (auto *ZI = dyn_cast<ZExtInst>(V))
3546           if (ZI->getSrcTy() == TruncatedTy)
3547             return ZI->getOperand(0);
3548         return B.CreateZExtOrTrunc(V, TruncatedTy);
3549       };
3550 
3551       // The actual instruction modification depends on the instruction type,
3552       // unfortunately.
3553       Value *NewI = nullptr;
3554       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3555         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3556                              ShrinkOperand(BO->getOperand(1)));
3557 
3558         // Any wrapping introduced by shrinking this operation shouldn't be
3559         // considered undefined behavior. So, we can't unconditionally copy
3560         // arithmetic wrapping flags to NewI.
3561         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3562       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3563         NewI =
3564             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3565                          ShrinkOperand(CI->getOperand(1)));
3566       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3567         NewI = B.CreateSelect(SI->getCondition(),
3568                               ShrinkOperand(SI->getTrueValue()),
3569                               ShrinkOperand(SI->getFalseValue()));
3570       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3571         switch (CI->getOpcode()) {
3572         default:
3573           llvm_unreachable("Unhandled cast!");
3574         case Instruction::Trunc:
3575           NewI = ShrinkOperand(CI->getOperand(0));
3576           break;
3577         case Instruction::SExt:
3578           NewI = B.CreateSExtOrTrunc(
3579               CI->getOperand(0),
3580               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3581           break;
3582         case Instruction::ZExt:
3583           NewI = B.CreateZExtOrTrunc(
3584               CI->getOperand(0),
3585               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3586           break;
3587         }
3588       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3589         auto Elements0 =
3590             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3591         auto *O0 = B.CreateZExtOrTrunc(
3592             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3593         auto Elements1 =
3594             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3595         auto *O1 = B.CreateZExtOrTrunc(
3596             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3597 
3598         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3599       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3600         // Don't do anything with the operands, just extend the result.
3601         continue;
3602       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3603         auto Elements =
3604             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3605         auto *O0 = B.CreateZExtOrTrunc(
3606             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3607         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3608         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3609       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3610         auto Elements =
3611             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3612         auto *O0 = B.CreateZExtOrTrunc(
3613             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3614         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3615       } else {
3616         // If we don't know what to do, be conservative and don't do anything.
3617         continue;
3618       }
3619 
3620       // Lastly, extend the result.
3621       NewI->takeName(cast<Instruction>(I));
3622       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3623       I->replaceAllUsesWith(Res);
3624       cast<Instruction>(I)->eraseFromParent();
3625       Erased.insert(I);
3626       State.reset(Def, Res, Part);
3627     }
3628   }
3629 
3630   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3631   for (const auto &KV : Cost->getMinimalBitwidths()) {
3632     // If the value wasn't vectorized, we must maintain the original scalar
3633     // type. The absence of the value from State indicates that it
3634     // wasn't vectorized.
3635     // FIXME: Should not rely on getVPValue at this point.
3636     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3637     if (!State.hasAnyVectorValue(Def))
3638       continue;
3639     for (unsigned Part = 0; Part < UF; ++Part) {
3640       Value *I = State.get(Def, Part);
3641       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3642       if (Inst && Inst->use_empty()) {
3643         Value *NewI = Inst->getOperand(0);
3644         Inst->eraseFromParent();
3645         State.reset(Def, NewI, Part);
3646       }
3647     }
3648   }
3649 }
3650 
3651 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3652   // Insert truncates and extends for any truncated instructions as hints to
3653   // InstCombine.
3654   if (VF.isVector())
3655     truncateToMinimalBitwidths(State);
3656 
3657   // Fix widened non-induction PHIs by setting up the PHI operands.
3658   if (OrigPHIsToFix.size()) {
3659     assert(EnableVPlanNativePath &&
3660            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3661     fixNonInductionPHIs(State);
3662   }
3663 
3664   // At this point every instruction in the original loop is widened to a
3665   // vector form. Now we need to fix the recurrences in the loop. These PHI
3666   // nodes are currently empty because we did not want to introduce cycles.
3667   // This is the second stage of vectorizing recurrences.
3668   fixCrossIterationPHIs(State);
3669 
3670   // Forget the original basic block.
3671   PSE.getSE()->forgetLoop(OrigLoop);
3672 
3673   Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB);
3674   // If we inserted an edge from the middle block to the unique exit block,
3675   // update uses outside the loop (phis) to account for the newly inserted
3676   // edge.
3677   if (!Cost->requiresScalarEpilogue(VF)) {
3678     // Fix-up external users of the induction variables.
3679     for (auto &Entry : Legal->getInductionVars())
3680       fixupIVUsers(Entry.first, Entry.second,
3681                    getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()),
3682                    IVEndValues[Entry.first], LoopMiddleBlock,
3683                    VectorLoop->getHeader());
3684 
3685     fixLCSSAPHIs(State);
3686   }
3687 
3688   for (Instruction *PI : PredicatedInstructions)
3689     sinkScalarOperands(&*PI);
3690 
3691   // Remove redundant induction instructions.
3692   cse(VectorLoop->getHeader());
3693 
3694   // Set/update profile weights for the vector and remainder loops as original
3695   // loop iterations are now distributed among them. Note that original loop
3696   // represented by LoopScalarBody becomes remainder loop after vectorization.
3697   //
3698   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3699   // end up getting slightly roughened result but that should be OK since
3700   // profile is not inherently precise anyway. Note also possible bypass of
3701   // vector code caused by legality checks is ignored, assigning all the weight
3702   // to the vector loop, optimistically.
3703   //
3704   // For scalable vectorization we can't know at compile time how many iterations
3705   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3706   // vscale of '1'.
3707   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop,
3708                                LI->getLoopFor(LoopScalarBody),
3709                                VF.getKnownMinValue() * UF);
3710 }
3711 
3712 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3713   // In order to support recurrences we need to be able to vectorize Phi nodes.
3714   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3715   // stage #2: We now need to fix the recurrences by adding incoming edges to
3716   // the currently empty PHI nodes. At this point every instruction in the
3717   // original loop is widened to a vector form so we can use them to construct
3718   // the incoming edges.
3719   VPBasicBlock *Header =
3720       State.Plan->getVectorLoopRegion()->getEntryBasicBlock();
3721   for (VPRecipeBase &R : Header->phis()) {
3722     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
3723       fixReduction(ReductionPhi, State);
3724     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3725       fixFirstOrderRecurrence(FOR, State);
3726   }
3727 }
3728 
3729 void InnerLoopVectorizer::fixFirstOrderRecurrence(
3730     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
3731   // This is the second phase of vectorizing first-order recurrences. An
3732   // overview of the transformation is described below. Suppose we have the
3733   // following loop.
3734   //
3735   //   for (int i = 0; i < n; ++i)
3736   //     b[i] = a[i] - a[i - 1];
3737   //
3738   // There is a first-order recurrence on "a". For this loop, the shorthand
3739   // scalar IR looks like:
3740   //
3741   //   scalar.ph:
3742   //     s_init = a[-1]
3743   //     br scalar.body
3744   //
3745   //   scalar.body:
3746   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3747   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3748   //     s2 = a[i]
3749   //     b[i] = s2 - s1
3750   //     br cond, scalar.body, ...
3751   //
3752   // In this example, s1 is a recurrence because it's value depends on the
3753   // previous iteration. In the first phase of vectorization, we created a
3754   // vector phi v1 for s1. We now complete the vectorization and produce the
3755   // shorthand vector IR shown below (for VF = 4, UF = 1).
3756   //
3757   //   vector.ph:
3758   //     v_init = vector(..., ..., ..., a[-1])
3759   //     br vector.body
3760   //
3761   //   vector.body
3762   //     i = phi [0, vector.ph], [i+4, vector.body]
3763   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3764   //     v2 = a[i, i+1, i+2, i+3];
3765   //     v3 = vector(v1(3), v2(0, 1, 2))
3766   //     b[i, i+1, i+2, i+3] = v2 - v3
3767   //     br cond, vector.body, middle.block
3768   //
3769   //   middle.block:
3770   //     x = v2(3)
3771   //     br scalar.ph
3772   //
3773   //   scalar.ph:
3774   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3775   //     br scalar.body
3776   //
3777   // After execution completes the vector loop, we extract the next value of
3778   // the recurrence (x) to use as the initial value in the scalar loop.
3779 
3780   // Extract the last vector element in the middle block. This will be the
3781   // initial value for the recurrence when jumping to the scalar loop.
3782   VPValue *PreviousDef = PhiR->getBackedgeValue();
3783   Value *Incoming = State.get(PreviousDef, UF - 1);
3784   auto *ExtractForScalar = Incoming;
3785   auto *IdxTy = Builder.getInt32Ty();
3786   if (VF.isVector()) {
3787     auto *One = ConstantInt::get(IdxTy, 1);
3788     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3789     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3790     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3791     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
3792                                                     "vector.recur.extract");
3793   }
3794   // Extract the second last element in the middle block if the
3795   // Phi is used outside the loop. We need to extract the phi itself
3796   // and not the last element (the phi update in the current iteration). This
3797   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3798   // when the scalar loop is not run at all.
3799   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3800   if (VF.isVector()) {
3801     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3802     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
3803     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3804         Incoming, Idx, "vector.recur.extract.for.phi");
3805   } else if (UF > 1)
3806     // When loop is unrolled without vectorizing, initialize
3807     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
3808     // of `Incoming`. This is analogous to the vectorized case above: extracting
3809     // the second last element when VF > 1.
3810     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
3811 
3812   // Fix the initial value of the original recurrence in the scalar loop.
3813   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3814   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
3815   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3816   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
3817   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3818     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3819     Start->addIncoming(Incoming, BB);
3820   }
3821 
3822   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3823   Phi->setName("scalar.recur");
3824 
3825   // Finally, fix users of the recurrence outside the loop. The users will need
3826   // either the last value of the scalar recurrence or the last value of the
3827   // vector recurrence we extracted in the middle block. Since the loop is in
3828   // LCSSA form, we just need to find all the phi nodes for the original scalar
3829   // recurrence in the exit block, and then add an edge for the middle block.
3830   // Note that LCSSA does not imply single entry when the original scalar loop
3831   // had multiple exiting edges (as we always run the last iteration in the
3832   // scalar epilogue); in that case, there is no edge from middle to exit and
3833   // and thus no phis which needed updated.
3834   if (!Cost->requiresScalarEpilogue(VF))
3835     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
3836       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
3837         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3838 }
3839 
3840 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
3841                                        VPTransformState &State) {
3842   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
3843   // Get it's reduction variable descriptor.
3844   assert(Legal->isReductionVariable(OrigPhi) &&
3845          "Unable to find the reduction variable");
3846   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
3847 
3848   RecurKind RK = RdxDesc.getRecurrenceKind();
3849   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3850   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3851   setDebugLocFromInst(ReductionStartValue);
3852 
3853   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
3854   // This is the vector-clone of the value that leaves the loop.
3855   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
3856 
3857   // Wrap flags are in general invalid after vectorization, clear them.
3858   clearReductionWrapFlags(RdxDesc, State);
3859 
3860   // Before each round, move the insertion point right between
3861   // the PHIs and the values we are going to write.
3862   // This allows us to write both PHINodes and the extractelement
3863   // instructions.
3864   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3865 
3866   setDebugLocFromInst(LoopExitInst);
3867 
3868   Type *PhiTy = OrigPhi->getType();
3869   BasicBlock *VectorLoopLatch =
3870       LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
3871   // If tail is folded by masking, the vector value to leave the loop should be
3872   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3873   // instead of the former. For an inloop reduction the reduction will already
3874   // be predicated, and does not need to be handled here.
3875   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
3876     for (unsigned Part = 0; Part < UF; ++Part) {
3877       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
3878       Value *Sel = nullptr;
3879       for (User *U : VecLoopExitInst->users()) {
3880         if (isa<SelectInst>(U)) {
3881           assert(!Sel && "Reduction exit feeding two selects");
3882           Sel = U;
3883         } else
3884           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3885       }
3886       assert(Sel && "Reduction exit feeds no select");
3887       State.reset(LoopExitInstDef, Sel, Part);
3888 
3889       // If the target can create a predicated operator for the reduction at no
3890       // extra cost in the loop (for example a predicated vadd), it can be
3891       // cheaper for the select to remain in the loop than be sunk out of it,
3892       // and so use the select value for the phi instead of the old
3893       // LoopExitValue.
3894       if (PreferPredicatedReductionSelect ||
3895           TTI->preferPredicatedReductionSelect(
3896               RdxDesc.getOpcode(), PhiTy,
3897               TargetTransformInfo::ReductionFlags())) {
3898         auto *VecRdxPhi =
3899             cast<PHINode>(State.get(PhiR, Part));
3900         VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel);
3901       }
3902     }
3903   }
3904 
3905   // If the vector reduction can be performed in a smaller type, we truncate
3906   // then extend the loop exit value to enable InstCombine to evaluate the
3907   // entire expression in the smaller type.
3908   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
3909     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
3910     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3911     Builder.SetInsertPoint(VectorLoopLatch->getTerminator());
3912     VectorParts RdxParts(UF);
3913     for (unsigned Part = 0; Part < UF; ++Part) {
3914       RdxParts[Part] = State.get(LoopExitInstDef, Part);
3915       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3916       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3917                                         : Builder.CreateZExt(Trunc, VecTy);
3918       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
3919         if (U != Trunc) {
3920           U->replaceUsesOfWith(RdxParts[Part], Extnd);
3921           RdxParts[Part] = Extnd;
3922         }
3923     }
3924     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3925     for (unsigned Part = 0; Part < UF; ++Part) {
3926       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3927       State.reset(LoopExitInstDef, RdxParts[Part], Part);
3928     }
3929   }
3930 
3931   // Reduce all of the unrolled parts into a single vector.
3932   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
3933   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
3934 
3935   // The middle block terminator has already been assigned a DebugLoc here (the
3936   // OrigLoop's single latch terminator). We want the whole middle block to
3937   // appear to execute on this line because: (a) it is all compiler generated,
3938   // (b) these instructions are always executed after evaluating the latch
3939   // conditional branch, and (c) other passes may add new predecessors which
3940   // terminate on this line. This is the easiest way to ensure we don't
3941   // accidentally cause an extra step back into the loop while debugging.
3942   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
3943   if (PhiR->isOrdered())
3944     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
3945   else {
3946     // Floating-point operations should have some FMF to enable the reduction.
3947     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
3948     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
3949     for (unsigned Part = 1; Part < UF; ++Part) {
3950       Value *RdxPart = State.get(LoopExitInstDef, Part);
3951       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
3952         ReducedPartRdx = Builder.CreateBinOp(
3953             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
3954       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
3955         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
3956                                            ReducedPartRdx, RdxPart);
3957       else
3958         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
3959     }
3960   }
3961 
3962   // Create the reduction after the loop. Note that inloop reductions create the
3963   // target reduction in the loop using a Reduction recipe.
3964   if (VF.isVector() && !PhiR->isInLoop()) {
3965     ReducedPartRdx =
3966         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
3967     // If the reduction can be performed in a smaller type, we need to extend
3968     // the reduction to the wider type before we branch to the original loop.
3969     if (PhiTy != RdxDesc.getRecurrenceType())
3970       ReducedPartRdx = RdxDesc.isSigned()
3971                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
3972                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
3973   }
3974 
3975   PHINode *ResumePhi =
3976       dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
3977 
3978   // Create a phi node that merges control-flow from the backedge-taken check
3979   // block and the middle block.
3980   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
3981                                         LoopScalarPreHeader->getTerminator());
3982 
3983   // If we are fixing reductions in the epilogue loop then we should already
3984   // have created a bc.merge.rdx Phi after the main vector body. Ensure that
3985   // we carry over the incoming values correctly.
3986   for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
3987     if (Incoming == LoopMiddleBlock)
3988       BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
3989     else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
3990       BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
3991                               Incoming);
3992     else
3993       BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
3994   }
3995 
3996   // Set the resume value for this reduction
3997   ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
3998 
3999   // Now, we need to fix the users of the reduction variable
4000   // inside and outside of the scalar remainder loop.
4001 
4002   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4003   // in the exit blocks.  See comment on analogous loop in
4004   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4005   if (!Cost->requiresScalarEpilogue(VF))
4006     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4007       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4008         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4009 
4010   // Fix the scalar loop reduction variable with the incoming reduction sum
4011   // from the vector body and from the backedge value.
4012   int IncomingEdgeBlockIdx =
4013       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4014   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4015   // Pick the other block.
4016   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4017   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4018   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4019 }
4020 
4021 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4022                                                   VPTransformState &State) {
4023   RecurKind RK = RdxDesc.getRecurrenceKind();
4024   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4025     return;
4026 
4027   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4028   assert(LoopExitInstr && "null loop exit instruction");
4029   SmallVector<Instruction *, 8> Worklist;
4030   SmallPtrSet<Instruction *, 8> Visited;
4031   Worklist.push_back(LoopExitInstr);
4032   Visited.insert(LoopExitInstr);
4033 
4034   while (!Worklist.empty()) {
4035     Instruction *Cur = Worklist.pop_back_val();
4036     if (isa<OverflowingBinaryOperator>(Cur))
4037       for (unsigned Part = 0; Part < UF; ++Part) {
4038         // FIXME: Should not rely on getVPValue at this point.
4039         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4040         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4041       }
4042 
4043     for (User *U : Cur->users()) {
4044       Instruction *UI = cast<Instruction>(U);
4045       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4046           Visited.insert(UI).second)
4047         Worklist.push_back(UI);
4048     }
4049   }
4050 }
4051 
4052 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4053   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4054     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4055       // Some phis were already hand updated by the reduction and recurrence
4056       // code above, leave them alone.
4057       continue;
4058 
4059     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4060     // Non-instruction incoming values will have only one value.
4061 
4062     VPLane Lane = VPLane::getFirstLane();
4063     if (isa<Instruction>(IncomingValue) &&
4064         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4065                                            VF))
4066       Lane = VPLane::getLastLaneForVF(VF);
4067 
4068     // Can be a loop invariant incoming value or the last scalar value to be
4069     // extracted from the vectorized loop.
4070     // FIXME: Should not rely on getVPValue at this point.
4071     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4072     Value *lastIncomingValue =
4073         OrigLoop->isLoopInvariant(IncomingValue)
4074             ? IncomingValue
4075             : State.get(State.Plan->getVPValue(IncomingValue, true),
4076                         VPIteration(UF - 1, Lane));
4077     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4078   }
4079 }
4080 
4081 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4082   // The basic block and loop containing the predicated instruction.
4083   auto *PredBB = PredInst->getParent();
4084   auto *VectorLoop = LI->getLoopFor(PredBB);
4085 
4086   // Initialize a worklist with the operands of the predicated instruction.
4087   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4088 
4089   // Holds instructions that we need to analyze again. An instruction may be
4090   // reanalyzed if we don't yet know if we can sink it or not.
4091   SmallVector<Instruction *, 8> InstsToReanalyze;
4092 
4093   // Returns true if a given use occurs in the predicated block. Phi nodes use
4094   // their operands in their corresponding predecessor blocks.
4095   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4096     auto *I = cast<Instruction>(U.getUser());
4097     BasicBlock *BB = I->getParent();
4098     if (auto *Phi = dyn_cast<PHINode>(I))
4099       BB = Phi->getIncomingBlock(
4100           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4101     return BB == PredBB;
4102   };
4103 
4104   // Iteratively sink the scalarized operands of the predicated instruction
4105   // into the block we created for it. When an instruction is sunk, it's
4106   // operands are then added to the worklist. The algorithm ends after one pass
4107   // through the worklist doesn't sink a single instruction.
4108   bool Changed;
4109   do {
4110     // Add the instructions that need to be reanalyzed to the worklist, and
4111     // reset the changed indicator.
4112     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4113     InstsToReanalyze.clear();
4114     Changed = false;
4115 
4116     while (!Worklist.empty()) {
4117       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4118 
4119       // We can't sink an instruction if it is a phi node, is not in the loop,
4120       // or may have side effects.
4121       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4122           I->mayHaveSideEffects())
4123         continue;
4124 
4125       // If the instruction is already in PredBB, check if we can sink its
4126       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4127       // sinking the scalar instruction I, hence it appears in PredBB; but it
4128       // may have failed to sink I's operands (recursively), which we try
4129       // (again) here.
4130       if (I->getParent() == PredBB) {
4131         Worklist.insert(I->op_begin(), I->op_end());
4132         continue;
4133       }
4134 
4135       // It's legal to sink the instruction if all its uses occur in the
4136       // predicated block. Otherwise, there's nothing to do yet, and we may
4137       // need to reanalyze the instruction.
4138       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4139         InstsToReanalyze.push_back(I);
4140         continue;
4141       }
4142 
4143       // Move the instruction to the beginning of the predicated block, and add
4144       // it's operands to the worklist.
4145       I->moveBefore(&*PredBB->getFirstInsertionPt());
4146       Worklist.insert(I->op_begin(), I->op_end());
4147 
4148       // The sinking may have enabled other instructions to be sunk, so we will
4149       // need to iterate.
4150       Changed = true;
4151     }
4152   } while (Changed);
4153 }
4154 
4155 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4156   for (PHINode *OrigPhi : OrigPHIsToFix) {
4157     VPWidenPHIRecipe *VPPhi =
4158         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4159     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4160     // Make sure the builder has a valid insert point.
4161     Builder.SetInsertPoint(NewPhi);
4162     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4163       VPValue *Inc = VPPhi->getIncomingValue(i);
4164       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4165       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4166     }
4167   }
4168 }
4169 
4170 bool InnerLoopVectorizer::useOrderedReductions(
4171     const RecurrenceDescriptor &RdxDesc) {
4172   return Cost->useOrderedReductions(RdxDesc);
4173 }
4174 
4175 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4176                                               VPWidenPHIRecipe *PhiR,
4177                                               VPTransformState &State) {
4178   assert(EnableVPlanNativePath &&
4179          "Non-native vplans are not expected to have VPWidenPHIRecipes.");
4180   // Currently we enter here in the VPlan-native path for non-induction
4181   // PHIs where all control flow is uniform. We simply widen these PHIs.
4182   // Create a vector phi with no operands - the vector phi operands will be
4183   // set at the end of vector code generation.
4184   Type *VecTy = (State.VF.isScalar())
4185                     ? PN->getType()
4186                     : VectorType::get(PN->getType(), State.VF);
4187   Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4188   State.set(PhiR, VecPhi, 0);
4189   OrigPHIsToFix.push_back(cast<PHINode>(PN));
4190 }
4191 
4192 /// A helper function for checking whether an integer division-related
4193 /// instruction may divide by zero (in which case it must be predicated if
4194 /// executed conditionally in the scalar code).
4195 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4196 /// Non-zero divisors that are non compile-time constants will not be
4197 /// converted into multiplication, so we will still end up scalarizing
4198 /// the division, but can do so w/o predication.
4199 static bool mayDivideByZero(Instruction &I) {
4200   assert((I.getOpcode() == Instruction::UDiv ||
4201           I.getOpcode() == Instruction::SDiv ||
4202           I.getOpcode() == Instruction::URem ||
4203           I.getOpcode() == Instruction::SRem) &&
4204          "Unexpected instruction");
4205   Value *Divisor = I.getOperand(1);
4206   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4207   return !CInt || CInt->isZero();
4208 }
4209 
4210 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4211                                                VPUser &ArgOperands,
4212                                                VPTransformState &State) {
4213   assert(!isa<DbgInfoIntrinsic>(I) &&
4214          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4215   setDebugLocFromInst(&I);
4216 
4217   Module *M = I.getParent()->getParent()->getParent();
4218   auto *CI = cast<CallInst>(&I);
4219 
4220   SmallVector<Type *, 4> Tys;
4221   for (Value *ArgOperand : CI->args())
4222     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4223 
4224   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4225 
4226   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4227   // version of the instruction.
4228   // Is it beneficial to perform intrinsic call compared to lib call?
4229   bool NeedToScalarize = false;
4230   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4231   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4232   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4233   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4234          "Instruction should be scalarized elsewhere.");
4235   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4236          "Either the intrinsic cost or vector call cost must be valid");
4237 
4238   for (unsigned Part = 0; Part < UF; ++Part) {
4239     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4240     SmallVector<Value *, 4> Args;
4241     for (auto &I : enumerate(ArgOperands.operands())) {
4242       // Some intrinsics have a scalar argument - don't replace it with a
4243       // vector.
4244       Value *Arg;
4245       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4246         Arg = State.get(I.value(), Part);
4247       else {
4248         Arg = State.get(I.value(), VPIteration(0, 0));
4249         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4250           TysForDecl.push_back(Arg->getType());
4251       }
4252       Args.push_back(Arg);
4253     }
4254 
4255     Function *VectorF;
4256     if (UseVectorIntrinsic) {
4257       // Use vector version of the intrinsic.
4258       if (VF.isVector())
4259         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4260       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4261       assert(VectorF && "Can't retrieve vector intrinsic.");
4262     } else {
4263       // Use vector version of the function call.
4264       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4265 #ifndef NDEBUG
4266       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4267              "Can't create vector function.");
4268 #endif
4269         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4270     }
4271       SmallVector<OperandBundleDef, 1> OpBundles;
4272       CI->getOperandBundlesAsDefs(OpBundles);
4273       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4274 
4275       if (isa<FPMathOperator>(V))
4276         V->copyFastMathFlags(CI);
4277 
4278       State.set(Def, V, Part);
4279       addMetadata(V, &I);
4280   }
4281 }
4282 
4283 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4284   // We should not collect Scalars more than once per VF. Right now, this
4285   // function is called from collectUniformsAndScalars(), which already does
4286   // this check. Collecting Scalars for VF=1 does not make any sense.
4287   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4288          "This function should not be visited twice for the same VF");
4289 
4290   // This avoids any chances of creating a REPLICATE recipe during planning
4291   // since that would result in generation of scalarized code during execution,
4292   // which is not supported for scalable vectors.
4293   if (VF.isScalable()) {
4294     Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
4295     return;
4296   }
4297 
4298   SmallSetVector<Instruction *, 8> Worklist;
4299 
4300   // These sets are used to seed the analysis with pointers used by memory
4301   // accesses that will remain scalar.
4302   SmallSetVector<Instruction *, 8> ScalarPtrs;
4303   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4304   auto *Latch = TheLoop->getLoopLatch();
4305 
4306   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4307   // The pointer operands of loads and stores will be scalar as long as the
4308   // memory access is not a gather or scatter operation. The value operand of a
4309   // store will remain scalar if the store is scalarized.
4310   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4311     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4312     assert(WideningDecision != CM_Unknown &&
4313            "Widening decision should be ready at this moment");
4314     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4315       if (Ptr == Store->getValueOperand())
4316         return WideningDecision == CM_Scalarize;
4317     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4318            "Ptr is neither a value or pointer operand");
4319     return WideningDecision != CM_GatherScatter;
4320   };
4321 
4322   // A helper that returns true if the given value is a bitcast or
4323   // getelementptr instruction contained in the loop.
4324   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4325     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4326             isa<GetElementPtrInst>(V)) &&
4327            !TheLoop->isLoopInvariant(V);
4328   };
4329 
4330   // A helper that evaluates a memory access's use of a pointer. If the use will
4331   // be a scalar use and the pointer is only used by memory accesses, we place
4332   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4333   // PossibleNonScalarPtrs.
4334   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4335     // We only care about bitcast and getelementptr instructions contained in
4336     // the loop.
4337     if (!isLoopVaryingBitCastOrGEP(Ptr))
4338       return;
4339 
4340     // If the pointer has already been identified as scalar (e.g., if it was
4341     // also identified as uniform), there's nothing to do.
4342     auto *I = cast<Instruction>(Ptr);
4343     if (Worklist.count(I))
4344       return;
4345 
4346     // If the use of the pointer will be a scalar use, and all users of the
4347     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4348     // place the pointer in PossibleNonScalarPtrs.
4349     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4350           return isa<LoadInst>(U) || isa<StoreInst>(U);
4351         }))
4352       ScalarPtrs.insert(I);
4353     else
4354       PossibleNonScalarPtrs.insert(I);
4355   };
4356 
4357   // We seed the scalars analysis with three classes of instructions: (1)
4358   // instructions marked uniform-after-vectorization and (2) bitcast,
4359   // getelementptr and (pointer) phi instructions used by memory accesses
4360   // requiring a scalar use.
4361   //
4362   // (1) Add to the worklist all instructions that have been identified as
4363   // uniform-after-vectorization.
4364   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4365 
4366   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4367   // memory accesses requiring a scalar use. The pointer operands of loads and
4368   // stores will be scalar as long as the memory accesses is not a gather or
4369   // scatter operation. The value operand of a store will remain scalar if the
4370   // store is scalarized.
4371   for (auto *BB : TheLoop->blocks())
4372     for (auto &I : *BB) {
4373       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4374         evaluatePtrUse(Load, Load->getPointerOperand());
4375       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4376         evaluatePtrUse(Store, Store->getPointerOperand());
4377         evaluatePtrUse(Store, Store->getValueOperand());
4378       }
4379     }
4380   for (auto *I : ScalarPtrs)
4381     if (!PossibleNonScalarPtrs.count(I)) {
4382       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4383       Worklist.insert(I);
4384     }
4385 
4386   // Insert the forced scalars.
4387   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4388   // induction variable when the PHI user is scalarized.
4389   auto ForcedScalar = ForcedScalars.find(VF);
4390   if (ForcedScalar != ForcedScalars.end())
4391     for (auto *I : ForcedScalar->second)
4392       Worklist.insert(I);
4393 
4394   // Expand the worklist by looking through any bitcasts and getelementptr
4395   // instructions we've already identified as scalar. This is similar to the
4396   // expansion step in collectLoopUniforms(); however, here we're only
4397   // expanding to include additional bitcasts and getelementptr instructions.
4398   unsigned Idx = 0;
4399   while (Idx != Worklist.size()) {
4400     Instruction *Dst = Worklist[Idx++];
4401     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4402       continue;
4403     auto *Src = cast<Instruction>(Dst->getOperand(0));
4404     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4405           auto *J = cast<Instruction>(U);
4406           return !TheLoop->contains(J) || Worklist.count(J) ||
4407                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4408                   isScalarUse(J, Src));
4409         })) {
4410       Worklist.insert(Src);
4411       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4412     }
4413   }
4414 
4415   // An induction variable will remain scalar if all users of the induction
4416   // variable and induction variable update remain scalar.
4417   for (auto &Induction : Legal->getInductionVars()) {
4418     auto *Ind = Induction.first;
4419     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4420 
4421     // If tail-folding is applied, the primary induction variable will be used
4422     // to feed a vector compare.
4423     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4424       continue;
4425 
4426     // Returns true if \p Indvar is a pointer induction that is used directly by
4427     // load/store instruction \p I.
4428     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4429                                               Instruction *I) {
4430       return Induction.second.getKind() ==
4431                  InductionDescriptor::IK_PtrInduction &&
4432              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4433              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4434     };
4435 
4436     // Determine if all users of the induction variable are scalar after
4437     // vectorization.
4438     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4439       auto *I = cast<Instruction>(U);
4440       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4441              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4442     });
4443     if (!ScalarInd)
4444       continue;
4445 
4446     // Determine if all users of the induction variable update instruction are
4447     // scalar after vectorization.
4448     auto ScalarIndUpdate =
4449         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4450           auto *I = cast<Instruction>(U);
4451           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4452                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4453         });
4454     if (!ScalarIndUpdate)
4455       continue;
4456 
4457     // The induction variable and its update instruction will remain scalar.
4458     Worklist.insert(Ind);
4459     Worklist.insert(IndUpdate);
4460     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4461     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4462                       << "\n");
4463   }
4464 
4465   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4466 }
4467 
4468 bool LoopVectorizationCostModel::isScalarWithPredication(
4469     Instruction *I, ElementCount VF) const {
4470   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4471     return false;
4472   switch(I->getOpcode()) {
4473   default:
4474     break;
4475   case Instruction::Load:
4476   case Instruction::Store: {
4477     if (!Legal->isMaskRequired(I))
4478       return false;
4479     auto *Ptr = getLoadStorePointerOperand(I);
4480     auto *Ty = getLoadStoreType(I);
4481     Type *VTy = Ty;
4482     if (VF.isVector())
4483       VTy = VectorType::get(Ty, VF);
4484     const Align Alignment = getLoadStoreAlignment(I);
4485     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4486                                 TTI.isLegalMaskedGather(VTy, Alignment))
4487                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4488                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4489   }
4490   case Instruction::UDiv:
4491   case Instruction::SDiv:
4492   case Instruction::SRem:
4493   case Instruction::URem:
4494     return mayDivideByZero(*I);
4495   }
4496   return false;
4497 }
4498 
4499 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4500     Instruction *I, ElementCount VF) {
4501   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4502   assert(getWideningDecision(I, VF) == CM_Unknown &&
4503          "Decision should not be set yet.");
4504   auto *Group = getInterleavedAccessGroup(I);
4505   assert(Group && "Must have a group.");
4506 
4507   // If the instruction's allocated size doesn't equal it's type size, it
4508   // requires padding and will be scalarized.
4509   auto &DL = I->getModule()->getDataLayout();
4510   auto *ScalarTy = getLoadStoreType(I);
4511   if (hasIrregularType(ScalarTy, DL))
4512     return false;
4513 
4514   // Check if masking is required.
4515   // A Group may need masking for one of two reasons: it resides in a block that
4516   // needs predication, or it was decided to use masking to deal with gaps
4517   // (either a gap at the end of a load-access that may result in a speculative
4518   // load, or any gaps in a store-access).
4519   bool PredicatedAccessRequiresMasking =
4520       blockNeedsPredicationForAnyReason(I->getParent()) &&
4521       Legal->isMaskRequired(I);
4522   bool LoadAccessWithGapsRequiresEpilogMasking =
4523       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4524       !isScalarEpilogueAllowed();
4525   bool StoreAccessWithGapsRequiresMasking =
4526       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4527   if (!PredicatedAccessRequiresMasking &&
4528       !LoadAccessWithGapsRequiresEpilogMasking &&
4529       !StoreAccessWithGapsRequiresMasking)
4530     return true;
4531 
4532   // If masked interleaving is required, we expect that the user/target had
4533   // enabled it, because otherwise it either wouldn't have been created or
4534   // it should have been invalidated by the CostModel.
4535   assert(useMaskedInterleavedAccesses(TTI) &&
4536          "Masked interleave-groups for predicated accesses are not enabled.");
4537 
4538   if (Group->isReverse())
4539     return false;
4540 
4541   auto *Ty = getLoadStoreType(I);
4542   const Align Alignment = getLoadStoreAlignment(I);
4543   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4544                           : TTI.isLegalMaskedStore(Ty, Alignment);
4545 }
4546 
4547 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4548     Instruction *I, ElementCount VF) {
4549   // Get and ensure we have a valid memory instruction.
4550   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4551 
4552   auto *Ptr = getLoadStorePointerOperand(I);
4553   auto *ScalarTy = getLoadStoreType(I);
4554 
4555   // In order to be widened, the pointer should be consecutive, first of all.
4556   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4557     return false;
4558 
4559   // If the instruction is a store located in a predicated block, it will be
4560   // scalarized.
4561   if (isScalarWithPredication(I, VF))
4562     return false;
4563 
4564   // If the instruction's allocated size doesn't equal it's type size, it
4565   // requires padding and will be scalarized.
4566   auto &DL = I->getModule()->getDataLayout();
4567   if (hasIrregularType(ScalarTy, DL))
4568     return false;
4569 
4570   return true;
4571 }
4572 
4573 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4574   // We should not collect Uniforms more than once per VF. Right now,
4575   // this function is called from collectUniformsAndScalars(), which
4576   // already does this check. Collecting Uniforms for VF=1 does not make any
4577   // sense.
4578 
4579   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4580          "This function should not be visited twice for the same VF");
4581 
4582   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4583   // not analyze again.  Uniforms.count(VF) will return 1.
4584   Uniforms[VF].clear();
4585 
4586   // We now know that the loop is vectorizable!
4587   // Collect instructions inside the loop that will remain uniform after
4588   // vectorization.
4589 
4590   // Global values, params and instructions outside of current loop are out of
4591   // scope.
4592   auto isOutOfScope = [&](Value *V) -> bool {
4593     Instruction *I = dyn_cast<Instruction>(V);
4594     return (!I || !TheLoop->contains(I));
4595   };
4596 
4597   // Worklist containing uniform instructions demanding lane 0.
4598   SetVector<Instruction *> Worklist;
4599   BasicBlock *Latch = TheLoop->getLoopLatch();
4600 
4601   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4602   // that are scalar with predication must not be considered uniform after
4603   // vectorization, because that would create an erroneous replicating region
4604   // where only a single instance out of VF should be formed.
4605   // TODO: optimize such seldom cases if found important, see PR40816.
4606   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4607     if (isOutOfScope(I)) {
4608       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
4609                         << *I << "\n");
4610       return;
4611     }
4612     if (isScalarWithPredication(I, VF)) {
4613       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4614                         << *I << "\n");
4615       return;
4616     }
4617     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4618     Worklist.insert(I);
4619   };
4620 
4621   // Start with the conditional branch. If the branch condition is an
4622   // instruction contained in the loop that is only used by the branch, it is
4623   // uniform.
4624   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4625   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4626     addToWorklistIfAllowed(Cmp);
4627 
4628   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
4629     InstWidening WideningDecision = getWideningDecision(I, VF);
4630     assert(WideningDecision != CM_Unknown &&
4631            "Widening decision should be ready at this moment");
4632 
4633     // A uniform memory op is itself uniform.  We exclude uniform stores
4634     // here as they demand the last lane, not the first one.
4635     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
4636       assert(WideningDecision == CM_Scalarize);
4637       return true;
4638     }
4639 
4640     return (WideningDecision == CM_Widen ||
4641             WideningDecision == CM_Widen_Reverse ||
4642             WideningDecision == CM_Interleave);
4643   };
4644 
4645 
4646   // Returns true if Ptr is the pointer operand of a memory access instruction
4647   // I, and I is known to not require scalarization.
4648   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4649     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4650   };
4651 
4652   // Holds a list of values which are known to have at least one uniform use.
4653   // Note that there may be other uses which aren't uniform.  A "uniform use"
4654   // here is something which only demands lane 0 of the unrolled iterations;
4655   // it does not imply that all lanes produce the same value (e.g. this is not
4656   // the usual meaning of uniform)
4657   SetVector<Value *> HasUniformUse;
4658 
4659   // Scan the loop for instructions which are either a) known to have only
4660   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
4661   for (auto *BB : TheLoop->blocks())
4662     for (auto &I : *BB) {
4663       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
4664         switch (II->getIntrinsicID()) {
4665         case Intrinsic::sideeffect:
4666         case Intrinsic::experimental_noalias_scope_decl:
4667         case Intrinsic::assume:
4668         case Intrinsic::lifetime_start:
4669         case Intrinsic::lifetime_end:
4670           if (TheLoop->hasLoopInvariantOperands(&I))
4671             addToWorklistIfAllowed(&I);
4672           break;
4673         default:
4674           break;
4675         }
4676       }
4677 
4678       // ExtractValue instructions must be uniform, because the operands are
4679       // known to be loop-invariant.
4680       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
4681         assert(isOutOfScope(EVI->getAggregateOperand()) &&
4682                "Expected aggregate value to be loop invariant");
4683         addToWorklistIfAllowed(EVI);
4684         continue;
4685       }
4686 
4687       // If there's no pointer operand, there's nothing to do.
4688       auto *Ptr = getLoadStorePointerOperand(&I);
4689       if (!Ptr)
4690         continue;
4691 
4692       // A uniform memory op is itself uniform.  We exclude uniform stores
4693       // here as they demand the last lane, not the first one.
4694       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
4695         addToWorklistIfAllowed(&I);
4696 
4697       if (isUniformDecision(&I, VF)) {
4698         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
4699         HasUniformUse.insert(Ptr);
4700       }
4701     }
4702 
4703   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
4704   // demanding) users.  Since loops are assumed to be in LCSSA form, this
4705   // disallows uses outside the loop as well.
4706   for (auto *V : HasUniformUse) {
4707     if (isOutOfScope(V))
4708       continue;
4709     auto *I = cast<Instruction>(V);
4710     auto UsersAreMemAccesses =
4711       llvm::all_of(I->users(), [&](User *U) -> bool {
4712         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4713       });
4714     if (UsersAreMemAccesses)
4715       addToWorklistIfAllowed(I);
4716   }
4717 
4718   // Expand Worklist in topological order: whenever a new instruction
4719   // is added , its users should be already inside Worklist.  It ensures
4720   // a uniform instruction will only be used by uniform instructions.
4721   unsigned idx = 0;
4722   while (idx != Worklist.size()) {
4723     Instruction *I = Worklist[idx++];
4724 
4725     for (auto OV : I->operand_values()) {
4726       // isOutOfScope operands cannot be uniform instructions.
4727       if (isOutOfScope(OV))
4728         continue;
4729       // First order recurrence Phi's should typically be considered
4730       // non-uniform.
4731       auto *OP = dyn_cast<PHINode>(OV);
4732       if (OP && Legal->isFirstOrderRecurrence(OP))
4733         continue;
4734       // If all the users of the operand are uniform, then add the
4735       // operand into the uniform worklist.
4736       auto *OI = cast<Instruction>(OV);
4737       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4738             auto *J = cast<Instruction>(U);
4739             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4740           }))
4741         addToWorklistIfAllowed(OI);
4742     }
4743   }
4744 
4745   // For an instruction to be added into Worklist above, all its users inside
4746   // the loop should also be in Worklist. However, this condition cannot be
4747   // true for phi nodes that form a cyclic dependence. We must process phi
4748   // nodes separately. An induction variable will remain uniform if all users
4749   // of the induction variable and induction variable update remain uniform.
4750   // The code below handles both pointer and non-pointer induction variables.
4751   for (auto &Induction : Legal->getInductionVars()) {
4752     auto *Ind = Induction.first;
4753     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4754 
4755     // Determine if all users of the induction variable are uniform after
4756     // vectorization.
4757     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4758       auto *I = cast<Instruction>(U);
4759       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4760              isVectorizedMemAccessUse(I, Ind);
4761     });
4762     if (!UniformInd)
4763       continue;
4764 
4765     // Determine if all users of the induction variable update instruction are
4766     // uniform after vectorization.
4767     auto UniformIndUpdate =
4768         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4769           auto *I = cast<Instruction>(U);
4770           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4771                  isVectorizedMemAccessUse(I, IndUpdate);
4772         });
4773     if (!UniformIndUpdate)
4774       continue;
4775 
4776     // The induction variable and its update instruction will remain uniform.
4777     addToWorklistIfAllowed(Ind);
4778     addToWorklistIfAllowed(IndUpdate);
4779   }
4780 
4781   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4782 }
4783 
4784 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4785   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4786 
4787   if (Legal->getRuntimePointerChecking()->Need) {
4788     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4789         "runtime pointer checks needed. Enable vectorization of this "
4790         "loop with '#pragma clang loop vectorize(enable)' when "
4791         "compiling with -Os/-Oz",
4792         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4793     return true;
4794   }
4795 
4796   if (!PSE.getPredicate().isAlwaysTrue()) {
4797     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4798         "runtime SCEV checks needed. Enable vectorization of this "
4799         "loop with '#pragma clang loop vectorize(enable)' when "
4800         "compiling with -Os/-Oz",
4801         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4802     return true;
4803   }
4804 
4805   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4806   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4807     reportVectorizationFailure("Runtime stride check for small trip count",
4808         "runtime stride == 1 checks needed. Enable vectorization of "
4809         "this loop without such check by compiling with -Os/-Oz",
4810         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4811     return true;
4812   }
4813 
4814   return false;
4815 }
4816 
4817 ElementCount
4818 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
4819   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
4820     return ElementCount::getScalable(0);
4821 
4822   if (Hints->isScalableVectorizationDisabled()) {
4823     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
4824                             "ScalableVectorizationDisabled", ORE, TheLoop);
4825     return ElementCount::getScalable(0);
4826   }
4827 
4828   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
4829 
4830   auto MaxScalableVF = ElementCount::getScalable(
4831       std::numeric_limits<ElementCount::ScalarTy>::max());
4832 
4833   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
4834   // FIXME: While for scalable vectors this is currently sufficient, this should
4835   // be replaced by a more detailed mechanism that filters out specific VFs,
4836   // instead of invalidating vectorization for a whole set of VFs based on the
4837   // MaxVF.
4838 
4839   // Disable scalable vectorization if the loop contains unsupported reductions.
4840   if (!canVectorizeReductions(MaxScalableVF)) {
4841     reportVectorizationInfo(
4842         "Scalable vectorization not supported for the reduction "
4843         "operations found in this loop.",
4844         "ScalableVFUnfeasible", ORE, TheLoop);
4845     return ElementCount::getScalable(0);
4846   }
4847 
4848   // Disable scalable vectorization if the loop contains any instructions
4849   // with element types not supported for scalable vectors.
4850   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
4851         return !Ty->isVoidTy() &&
4852                !this->TTI.isElementTypeLegalForScalableVector(Ty);
4853       })) {
4854     reportVectorizationInfo("Scalable vectorization is not supported "
4855                             "for all element types found in this loop.",
4856                             "ScalableVFUnfeasible", ORE, TheLoop);
4857     return ElementCount::getScalable(0);
4858   }
4859 
4860   if (Legal->isSafeForAnyVectorWidth())
4861     return MaxScalableVF;
4862 
4863   // Limit MaxScalableVF by the maximum safe dependence distance.
4864   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
4865   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
4866     MaxVScale =
4867         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
4868   MaxScalableVF = ElementCount::getScalable(
4869       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
4870   if (!MaxScalableVF)
4871     reportVectorizationInfo(
4872         "Max legal vector width too small, scalable vectorization "
4873         "unfeasible.",
4874         "ScalableVFUnfeasible", ORE, TheLoop);
4875 
4876   return MaxScalableVF;
4877 }
4878 
4879 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
4880     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
4881   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
4882   unsigned SmallestType, WidestType;
4883   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
4884 
4885   // Get the maximum safe dependence distance in bits computed by LAA.
4886   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
4887   // the memory accesses that is most restrictive (involved in the smallest
4888   // dependence distance).
4889   unsigned MaxSafeElements =
4890       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
4891 
4892   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
4893   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
4894 
4895   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
4896                     << ".\n");
4897   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
4898                     << ".\n");
4899 
4900   // First analyze the UserVF, fall back if the UserVF should be ignored.
4901   if (UserVF) {
4902     auto MaxSafeUserVF =
4903         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
4904 
4905     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
4906       // If `VF=vscale x N` is safe, then so is `VF=N`
4907       if (UserVF.isScalable())
4908         return FixedScalableVFPair(
4909             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
4910       else
4911         return UserVF;
4912     }
4913 
4914     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
4915 
4916     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
4917     // is better to ignore the hint and let the compiler choose a suitable VF.
4918     if (!UserVF.isScalable()) {
4919       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4920                         << " is unsafe, clamping to max safe VF="
4921                         << MaxSafeFixedVF << ".\n");
4922       ORE->emit([&]() {
4923         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4924                                           TheLoop->getStartLoc(),
4925                                           TheLoop->getHeader())
4926                << "User-specified vectorization factor "
4927                << ore::NV("UserVectorizationFactor", UserVF)
4928                << " is unsafe, clamping to maximum safe vectorization factor "
4929                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
4930       });
4931       return MaxSafeFixedVF;
4932     }
4933 
4934     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
4935       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4936                         << " is ignored because scalable vectors are not "
4937                            "available.\n");
4938       ORE->emit([&]() {
4939         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4940                                           TheLoop->getStartLoc(),
4941                                           TheLoop->getHeader())
4942                << "User-specified vectorization factor "
4943                << ore::NV("UserVectorizationFactor", UserVF)
4944                << " is ignored because the target does not support scalable "
4945                   "vectors. The compiler will pick a more suitable value.";
4946       });
4947     } else {
4948       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4949                         << " is unsafe. Ignoring scalable UserVF.\n");
4950       ORE->emit([&]() {
4951         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4952                                           TheLoop->getStartLoc(),
4953                                           TheLoop->getHeader())
4954                << "User-specified vectorization factor "
4955                << ore::NV("UserVectorizationFactor", UserVF)
4956                << " is unsafe. Ignoring the hint to let the compiler pick a "
4957                   "more suitable value.";
4958       });
4959     }
4960   }
4961 
4962   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
4963                     << " / " << WidestType << " bits.\n");
4964 
4965   FixedScalableVFPair Result(ElementCount::getFixed(1),
4966                              ElementCount::getScalable(0));
4967   if (auto MaxVF =
4968           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
4969                                   MaxSafeFixedVF, FoldTailByMasking))
4970     Result.FixedVF = MaxVF;
4971 
4972   if (auto MaxVF =
4973           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
4974                                   MaxSafeScalableVF, FoldTailByMasking))
4975     if (MaxVF.isScalable()) {
4976       Result.ScalableVF = MaxVF;
4977       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
4978                         << "\n");
4979     }
4980 
4981   return Result;
4982 }
4983 
4984 FixedScalableVFPair
4985 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
4986   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4987     // TODO: It may by useful to do since it's still likely to be dynamically
4988     // uniform if the target can skip.
4989     reportVectorizationFailure(
4990         "Not inserting runtime ptr check for divergent target",
4991         "runtime pointer checks needed. Not enabled for divergent target",
4992         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
4993     return FixedScalableVFPair::getNone();
4994   }
4995 
4996   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4997   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4998   if (TC == 1) {
4999     reportVectorizationFailure("Single iteration (non) loop",
5000         "loop trip count is one, irrelevant for vectorization",
5001         "SingleIterationLoop", ORE, TheLoop);
5002     return FixedScalableVFPair::getNone();
5003   }
5004 
5005   switch (ScalarEpilogueStatus) {
5006   case CM_ScalarEpilogueAllowed:
5007     return computeFeasibleMaxVF(TC, UserVF, false);
5008   case CM_ScalarEpilogueNotAllowedUsePredicate:
5009     LLVM_FALLTHROUGH;
5010   case CM_ScalarEpilogueNotNeededUsePredicate:
5011     LLVM_DEBUG(
5012         dbgs() << "LV: vector predicate hint/switch found.\n"
5013                << "LV: Not allowing scalar epilogue, creating predicated "
5014                << "vector loop.\n");
5015     break;
5016   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5017     // fallthrough as a special case of OptForSize
5018   case CM_ScalarEpilogueNotAllowedOptSize:
5019     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5020       LLVM_DEBUG(
5021           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5022     else
5023       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5024                         << "count.\n");
5025 
5026     // Bail if runtime checks are required, which are not good when optimising
5027     // for size.
5028     if (runtimeChecksRequired())
5029       return FixedScalableVFPair::getNone();
5030 
5031     break;
5032   }
5033 
5034   // The only loops we can vectorize without a scalar epilogue, are loops with
5035   // a bottom-test and a single exiting block. We'd have to handle the fact
5036   // that not every instruction executes on the last iteration.  This will
5037   // require a lane mask which varies through the vector loop body.  (TODO)
5038   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5039     // If there was a tail-folding hint/switch, but we can't fold the tail by
5040     // masking, fallback to a vectorization with a scalar epilogue.
5041     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5042       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5043                            "scalar epilogue instead.\n");
5044       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5045       return computeFeasibleMaxVF(TC, UserVF, false);
5046     }
5047     return FixedScalableVFPair::getNone();
5048   }
5049 
5050   // Now try the tail folding
5051 
5052   // Invalidate interleave groups that require an epilogue if we can't mask
5053   // the interleave-group.
5054   if (!useMaskedInterleavedAccesses(TTI)) {
5055     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5056            "No decisions should have been taken at this point");
5057     // Note: There is no need to invalidate any cost modeling decisions here, as
5058     // non where taken so far.
5059     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5060   }
5061 
5062   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5063   // Avoid tail folding if the trip count is known to be a multiple of any VF
5064   // we chose.
5065   // FIXME: The condition below pessimises the case for fixed-width vectors,
5066   // when scalable VFs are also candidates for vectorization.
5067   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5068     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5069     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5070            "MaxFixedVF must be a power of 2");
5071     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5072                                    : MaxFixedVF.getFixedValue();
5073     ScalarEvolution *SE = PSE.getSE();
5074     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5075     const SCEV *ExitCount = SE->getAddExpr(
5076         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5077     const SCEV *Rem = SE->getURemExpr(
5078         SE->applyLoopGuards(ExitCount, TheLoop),
5079         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5080     if (Rem->isZero()) {
5081       // Accept MaxFixedVF if we do not have a tail.
5082       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5083       return MaxFactors;
5084     }
5085   }
5086 
5087   // For scalable vectors don't use tail folding for low trip counts or
5088   // optimizing for code size. We only permit this if the user has explicitly
5089   // requested it.
5090   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5091       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5092       MaxFactors.ScalableVF.isVector())
5093     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5094 
5095   // If we don't know the precise trip count, or if the trip count that we
5096   // found modulo the vectorization factor is not zero, try to fold the tail
5097   // by masking.
5098   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5099   if (Legal->prepareToFoldTailByMasking()) {
5100     FoldTailByMasking = true;
5101     return MaxFactors;
5102   }
5103 
5104   // If there was a tail-folding hint/switch, but we can't fold the tail by
5105   // masking, fallback to a vectorization with a scalar epilogue.
5106   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5107     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5108                          "scalar epilogue instead.\n");
5109     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5110     return MaxFactors;
5111   }
5112 
5113   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5114     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5115     return FixedScalableVFPair::getNone();
5116   }
5117 
5118   if (TC == 0) {
5119     reportVectorizationFailure(
5120         "Unable to calculate the loop count due to complex control flow",
5121         "unable to calculate the loop count due to complex control flow",
5122         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5123     return FixedScalableVFPair::getNone();
5124   }
5125 
5126   reportVectorizationFailure(
5127       "Cannot optimize for size and vectorize at the same time.",
5128       "cannot optimize for size and vectorize at the same time. "
5129       "Enable vectorization of this loop with '#pragma clang loop "
5130       "vectorize(enable)' when compiling with -Os/-Oz",
5131       "NoTailLoopWithOptForSize", ORE, TheLoop);
5132   return FixedScalableVFPair::getNone();
5133 }
5134 
5135 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5136     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5137     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5138   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5139   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5140       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5141                            : TargetTransformInfo::RGK_FixedWidthVector);
5142 
5143   // Convenience function to return the minimum of two ElementCounts.
5144   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5145     assert((LHS.isScalable() == RHS.isScalable()) &&
5146            "Scalable flags must match");
5147     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5148   };
5149 
5150   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5151   // Note that both WidestRegister and WidestType may not be a powers of 2.
5152   auto MaxVectorElementCount = ElementCount::get(
5153       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5154       ComputeScalableMaxVF);
5155   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5156   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5157                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5158 
5159   if (!MaxVectorElementCount) {
5160     LLVM_DEBUG(dbgs() << "LV: The target has no "
5161                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5162                       << " vector registers.\n");
5163     return ElementCount::getFixed(1);
5164   }
5165 
5166   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5167   if (ConstTripCount &&
5168       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5169       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5170     // If loop trip count (TC) is known at compile time there is no point in
5171     // choosing VF greater than TC (as done in the loop below). Select maximum
5172     // power of two which doesn't exceed TC.
5173     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5174     // when the TC is less than or equal to the known number of lanes.
5175     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5176     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5177                          "exceeding the constant trip count: "
5178                       << ClampedConstTripCount << "\n");
5179     return ElementCount::getFixed(ClampedConstTripCount);
5180   }
5181 
5182   ElementCount MaxVF = MaxVectorElementCount;
5183   if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
5184                             TTI.shouldMaximizeVectorBandwidth())) {
5185     auto MaxVectorElementCountMaxBW = ElementCount::get(
5186         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5187         ComputeScalableMaxVF);
5188     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5189 
5190     // Collect all viable vectorization factors larger than the default MaxVF
5191     // (i.e. MaxVectorElementCount).
5192     SmallVector<ElementCount, 8> VFs;
5193     for (ElementCount VS = MaxVectorElementCount * 2;
5194          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5195       VFs.push_back(VS);
5196 
5197     // For each VF calculate its register usage.
5198     auto RUs = calculateRegisterUsage(VFs);
5199 
5200     // Select the largest VF which doesn't require more registers than existing
5201     // ones.
5202     for (int i = RUs.size() - 1; i >= 0; --i) {
5203       bool Selected = true;
5204       for (auto &pair : RUs[i].MaxLocalUsers) {
5205         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5206         if (pair.second > TargetNumRegisters)
5207           Selected = false;
5208       }
5209       if (Selected) {
5210         MaxVF = VFs[i];
5211         break;
5212       }
5213     }
5214     if (ElementCount MinVF =
5215             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5216       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5217         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5218                           << ") with target's minimum: " << MinVF << '\n');
5219         MaxVF = MinVF;
5220       }
5221     }
5222 
5223     // Invalidate any widening decisions we might have made, in case the loop
5224     // requires prediction (decided later), but we have already made some
5225     // load/store widening decisions.
5226     invalidateCostModelingDecisions();
5227   }
5228   return MaxVF;
5229 }
5230 
5231 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
5232   if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5233     auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
5234     auto Min = Attr.getVScaleRangeMin();
5235     auto Max = Attr.getVScaleRangeMax();
5236     if (Max && Min == Max)
5237       return Max;
5238   }
5239 
5240   return TTI.getVScaleForTuning();
5241 }
5242 
5243 bool LoopVectorizationCostModel::isMoreProfitable(
5244     const VectorizationFactor &A, const VectorizationFactor &B) const {
5245   InstructionCost CostA = A.Cost;
5246   InstructionCost CostB = B.Cost;
5247 
5248   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5249 
5250   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5251       MaxTripCount) {
5252     // If we are folding the tail and the trip count is a known (possibly small)
5253     // constant, the trip count will be rounded up to an integer number of
5254     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5255     // which we compare directly. When not folding the tail, the total cost will
5256     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5257     // approximated with the per-lane cost below instead of using the tripcount
5258     // as here.
5259     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5260     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5261     return RTCostA < RTCostB;
5262   }
5263 
5264   // Improve estimate for the vector width if it is scalable.
5265   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5266   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5267   if (Optional<unsigned> VScale = getVScaleForTuning()) {
5268     if (A.Width.isScalable())
5269       EstimatedWidthA *= VScale.getValue();
5270     if (B.Width.isScalable())
5271       EstimatedWidthB *= VScale.getValue();
5272   }
5273 
5274   // Assume vscale may be larger than 1 (or the value being tuned for),
5275   // so that scalable vectorization is slightly favorable over fixed-width
5276   // vectorization.
5277   if (A.Width.isScalable() && !B.Width.isScalable())
5278     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5279 
5280   // To avoid the need for FP division:
5281   //      (CostA / A.Width) < (CostB / B.Width)
5282   // <=>  (CostA * B.Width) < (CostB * A.Width)
5283   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5284 }
5285 
5286 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5287     const ElementCountSet &VFCandidates) {
5288   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5289   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5290   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5291   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5292          "Expected Scalar VF to be a candidate");
5293 
5294   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5295   VectorizationFactor ChosenFactor = ScalarCost;
5296 
5297   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5298   if (ForceVectorization && VFCandidates.size() > 1) {
5299     // Ignore scalar width, because the user explicitly wants vectorization.
5300     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5301     // evaluation.
5302     ChosenFactor.Cost = InstructionCost::getMax();
5303   }
5304 
5305   SmallVector<InstructionVFPair> InvalidCosts;
5306   for (const auto &i : VFCandidates) {
5307     // The cost for scalar VF=1 is already calculated, so ignore it.
5308     if (i.isScalar())
5309       continue;
5310 
5311     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5312     VectorizationFactor Candidate(i, C.first);
5313 
5314 #ifndef NDEBUG
5315     unsigned AssumedMinimumVscale = 1;
5316     if (Optional<unsigned> VScale = getVScaleForTuning())
5317       AssumedMinimumVscale = VScale.getValue();
5318     unsigned Width =
5319         Candidate.Width.isScalable()
5320             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5321             : Candidate.Width.getFixedValue();
5322     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5323                       << " costs: " << (Candidate.Cost / Width));
5324     if (i.isScalable())
5325       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5326                         << AssumedMinimumVscale << ")");
5327     LLVM_DEBUG(dbgs() << ".\n");
5328 #endif
5329 
5330     if (!C.second && !ForceVectorization) {
5331       LLVM_DEBUG(
5332           dbgs() << "LV: Not considering vector loop of width " << i
5333                  << " because it will not generate any vector instructions.\n");
5334       continue;
5335     }
5336 
5337     // If profitable add it to ProfitableVF list.
5338     if (isMoreProfitable(Candidate, ScalarCost))
5339       ProfitableVFs.push_back(Candidate);
5340 
5341     if (isMoreProfitable(Candidate, ChosenFactor))
5342       ChosenFactor = Candidate;
5343   }
5344 
5345   // Emit a report of VFs with invalid costs in the loop.
5346   if (!InvalidCosts.empty()) {
5347     // Group the remarks per instruction, keeping the instruction order from
5348     // InvalidCosts.
5349     std::map<Instruction *, unsigned> Numbering;
5350     unsigned I = 0;
5351     for (auto &Pair : InvalidCosts)
5352       if (!Numbering.count(Pair.first))
5353         Numbering[Pair.first] = I++;
5354 
5355     // Sort the list, first on instruction(number) then on VF.
5356     llvm::sort(InvalidCosts,
5357                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5358                  if (Numbering[A.first] != Numbering[B.first])
5359                    return Numbering[A.first] < Numbering[B.first];
5360                  ElementCountComparator ECC;
5361                  return ECC(A.second, B.second);
5362                });
5363 
5364     // For a list of ordered instruction-vf pairs:
5365     //   [(load, vf1), (load, vf2), (store, vf1)]
5366     // Group the instructions together to emit separate remarks for:
5367     //   load  (vf1, vf2)
5368     //   store (vf1)
5369     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5370     auto Subset = ArrayRef<InstructionVFPair>();
5371     do {
5372       if (Subset.empty())
5373         Subset = Tail.take_front(1);
5374 
5375       Instruction *I = Subset.front().first;
5376 
5377       // If the next instruction is different, or if there are no other pairs,
5378       // emit a remark for the collated subset. e.g.
5379       //   [(load, vf1), (load, vf2))]
5380       // to emit:
5381       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5382       if (Subset == Tail || Tail[Subset.size()].first != I) {
5383         std::string OutString;
5384         raw_string_ostream OS(OutString);
5385         assert(!Subset.empty() && "Unexpected empty range");
5386         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5387         for (auto &Pair : Subset)
5388           OS << (Pair.second == Subset.front().second ? "" : ", ")
5389              << Pair.second;
5390         OS << "):";
5391         if (auto *CI = dyn_cast<CallInst>(I))
5392           OS << " call to " << CI->getCalledFunction()->getName();
5393         else
5394           OS << " " << I->getOpcodeName();
5395         OS.flush();
5396         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5397         Tail = Tail.drop_front(Subset.size());
5398         Subset = {};
5399       } else
5400         // Grow the subset by one element
5401         Subset = Tail.take_front(Subset.size() + 1);
5402     } while (!Tail.empty());
5403   }
5404 
5405   if (!EnableCondStoresVectorization && NumPredStores) {
5406     reportVectorizationFailure("There are conditional stores.",
5407         "store that is conditionally executed prevents vectorization",
5408         "ConditionalStore", ORE, TheLoop);
5409     ChosenFactor = ScalarCost;
5410   }
5411 
5412   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5413                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5414              << "LV: Vectorization seems to be not beneficial, "
5415              << "but was forced by a user.\n");
5416   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5417   return ChosenFactor;
5418 }
5419 
5420 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5421     const Loop &L, ElementCount VF) const {
5422   // Cross iteration phis such as reductions need special handling and are
5423   // currently unsupported.
5424   if (any_of(L.getHeader()->phis(),
5425              [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
5426     return false;
5427 
5428   // Phis with uses outside of the loop require special handling and are
5429   // currently unsupported.
5430   for (auto &Entry : Legal->getInductionVars()) {
5431     // Look for uses of the value of the induction at the last iteration.
5432     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5433     for (User *U : PostInc->users())
5434       if (!L.contains(cast<Instruction>(U)))
5435         return false;
5436     // Look for uses of penultimate value of the induction.
5437     for (User *U : Entry.first->users())
5438       if (!L.contains(cast<Instruction>(U)))
5439         return false;
5440   }
5441 
5442   // Induction variables that are widened require special handling that is
5443   // currently not supported.
5444   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5445         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5446                  this->isProfitableToScalarize(Entry.first, VF));
5447       }))
5448     return false;
5449 
5450   // Epilogue vectorization code has not been auditted to ensure it handles
5451   // non-latch exits properly.  It may be fine, but it needs auditted and
5452   // tested.
5453   if (L.getExitingBlock() != L.getLoopLatch())
5454     return false;
5455 
5456   return true;
5457 }
5458 
5459 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5460     const ElementCount VF) const {
5461   // FIXME: We need a much better cost-model to take different parameters such
5462   // as register pressure, code size increase and cost of extra branches into
5463   // account. For now we apply a very crude heuristic and only consider loops
5464   // with vectorization factors larger than a certain value.
5465   // We also consider epilogue vectorization unprofitable for targets that don't
5466   // consider interleaving beneficial (eg. MVE).
5467   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5468     return false;
5469   // FIXME: We should consider changing the threshold for scalable
5470   // vectors to take VScaleForTuning into account.
5471   if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
5472     return true;
5473   return false;
5474 }
5475 
5476 VectorizationFactor
5477 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5478     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5479   VectorizationFactor Result = VectorizationFactor::Disabled();
5480   if (!EnableEpilogueVectorization) {
5481     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5482     return Result;
5483   }
5484 
5485   if (!isScalarEpilogueAllowed()) {
5486     LLVM_DEBUG(
5487         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5488                   "allowed.\n";);
5489     return Result;
5490   }
5491 
5492   // Not really a cost consideration, but check for unsupported cases here to
5493   // simplify the logic.
5494   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5495     LLVM_DEBUG(
5496         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5497                   "not a supported candidate.\n";);
5498     return Result;
5499   }
5500 
5501   if (EpilogueVectorizationForceVF > 1) {
5502     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5503     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5504     if (LVP.hasPlanWithVF(ForcedEC))
5505       return {ForcedEC, 0};
5506     else {
5507       LLVM_DEBUG(
5508           dbgs()
5509               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5510       return Result;
5511     }
5512   }
5513 
5514   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5515       TheLoop->getHeader()->getParent()->hasMinSize()) {
5516     LLVM_DEBUG(
5517         dbgs()
5518             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5519     return Result;
5520   }
5521 
5522   if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
5523     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5524                          "this loop\n");
5525     return Result;
5526   }
5527 
5528   // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
5529   // the main loop handles 8 lanes per iteration. We could still benefit from
5530   // vectorizing the epilogue loop with VF=4.
5531   ElementCount EstimatedRuntimeVF = MainLoopVF;
5532   if (MainLoopVF.isScalable()) {
5533     EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5534     if (Optional<unsigned> VScale = getVScaleForTuning())
5535       EstimatedRuntimeVF *= VScale.getValue();
5536   }
5537 
5538   for (auto &NextVF : ProfitableVFs)
5539     if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
5540           ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
5541          ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
5542         (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
5543         LVP.hasPlanWithVF(NextVF.Width))
5544       Result = NextVF;
5545 
5546   if (Result != VectorizationFactor::Disabled())
5547     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5548                       << Result.Width << "\n";);
5549   return Result;
5550 }
5551 
5552 std::pair<unsigned, unsigned>
5553 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5554   unsigned MinWidth = -1U;
5555   unsigned MaxWidth = 8;
5556   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5557   // For in-loop reductions, no element types are added to ElementTypesInLoop
5558   // if there are no loads/stores in the loop. In this case, check through the
5559   // reduction variables to determine the maximum width.
5560   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5561     // Reset MaxWidth so that we can find the smallest type used by recurrences
5562     // in the loop.
5563     MaxWidth = -1U;
5564     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5565       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5566       // When finding the min width used by the recurrence we need to account
5567       // for casts on the input operands of the recurrence.
5568       MaxWidth = std::min<unsigned>(
5569           MaxWidth, std::min<unsigned>(
5570                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5571                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5572     }
5573   } else {
5574     for (Type *T : ElementTypesInLoop) {
5575       MinWidth = std::min<unsigned>(
5576           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5577       MaxWidth = std::max<unsigned>(
5578           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5579     }
5580   }
5581   return {MinWidth, MaxWidth};
5582 }
5583 
5584 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5585   ElementTypesInLoop.clear();
5586   // For each block.
5587   for (BasicBlock *BB : TheLoop->blocks()) {
5588     // For each instruction in the loop.
5589     for (Instruction &I : BB->instructionsWithoutDebug()) {
5590       Type *T = I.getType();
5591 
5592       // Skip ignored values.
5593       if (ValuesToIgnore.count(&I))
5594         continue;
5595 
5596       // Only examine Loads, Stores and PHINodes.
5597       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5598         continue;
5599 
5600       // Examine PHI nodes that are reduction variables. Update the type to
5601       // account for the recurrence type.
5602       if (auto *PN = dyn_cast<PHINode>(&I)) {
5603         if (!Legal->isReductionVariable(PN))
5604           continue;
5605         const RecurrenceDescriptor &RdxDesc =
5606             Legal->getReductionVars().find(PN)->second;
5607         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5608             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5609                                       RdxDesc.getRecurrenceType(),
5610                                       TargetTransformInfo::ReductionFlags()))
5611           continue;
5612         T = RdxDesc.getRecurrenceType();
5613       }
5614 
5615       // Examine the stored values.
5616       if (auto *ST = dyn_cast<StoreInst>(&I))
5617         T = ST->getValueOperand()->getType();
5618 
5619       assert(T->isSized() &&
5620              "Expected the load/store/recurrence type to be sized");
5621 
5622       ElementTypesInLoop.insert(T);
5623     }
5624   }
5625 }
5626 
5627 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5628                                                            unsigned LoopCost) {
5629   // -- The interleave heuristics --
5630   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5631   // There are many micro-architectural considerations that we can't predict
5632   // at this level. For example, frontend pressure (on decode or fetch) due to
5633   // code size, or the number and capabilities of the execution ports.
5634   //
5635   // We use the following heuristics to select the interleave count:
5636   // 1. If the code has reductions, then we interleave to break the cross
5637   // iteration dependency.
5638   // 2. If the loop is really small, then we interleave to reduce the loop
5639   // overhead.
5640   // 3. We don't interleave if we think that we will spill registers to memory
5641   // due to the increased register pressure.
5642 
5643   if (!isScalarEpilogueAllowed())
5644     return 1;
5645 
5646   // We used the distance for the interleave count.
5647   if (Legal->getMaxSafeDepDistBytes() != -1U)
5648     return 1;
5649 
5650   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5651   const bool HasReductions = !Legal->getReductionVars().empty();
5652   // Do not interleave loops with a relatively small known or estimated trip
5653   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5654   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5655   // because with the above conditions interleaving can expose ILP and break
5656   // cross iteration dependences for reductions.
5657   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5658       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5659     return 1;
5660 
5661   // If we did not calculate the cost for VF (because the user selected the VF)
5662   // then we calculate the cost of VF here.
5663   if (LoopCost == 0) {
5664     InstructionCost C = expectedCost(VF).first;
5665     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
5666     LoopCost = *C.getValue();
5667 
5668     // Loop body is free and there is no need for interleaving.
5669     if (LoopCost == 0)
5670       return 1;
5671   }
5672 
5673   RegisterUsage R = calculateRegisterUsage({VF})[0];
5674   // We divide by these constants so assume that we have at least one
5675   // instruction that uses at least one register.
5676   for (auto& pair : R.MaxLocalUsers) {
5677     pair.second = std::max(pair.second, 1U);
5678   }
5679 
5680   // We calculate the interleave count using the following formula.
5681   // Subtract the number of loop invariants from the number of available
5682   // registers. These registers are used by all of the interleaved instances.
5683   // Next, divide the remaining registers by the number of registers that is
5684   // required by the loop, in order to estimate how many parallel instances
5685   // fit without causing spills. All of this is rounded down if necessary to be
5686   // a power of two. We want power of two interleave count to simplify any
5687   // addressing operations or alignment considerations.
5688   // We also want power of two interleave counts to ensure that the induction
5689   // variable of the vector loop wraps to zero, when tail is folded by masking;
5690   // this currently happens when OptForSize, in which case IC is set to 1 above.
5691   unsigned IC = UINT_MAX;
5692 
5693   for (auto& pair : R.MaxLocalUsers) {
5694     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5695     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5696                       << " registers of "
5697                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5698     if (VF.isScalar()) {
5699       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5700         TargetNumRegisters = ForceTargetNumScalarRegs;
5701     } else {
5702       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5703         TargetNumRegisters = ForceTargetNumVectorRegs;
5704     }
5705     unsigned MaxLocalUsers = pair.second;
5706     unsigned LoopInvariantRegs = 0;
5707     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5708       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5709 
5710     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5711     // Don't count the induction variable as interleaved.
5712     if (EnableIndVarRegisterHeur) {
5713       TmpIC =
5714           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5715                         std::max(1U, (MaxLocalUsers - 1)));
5716     }
5717 
5718     IC = std::min(IC, TmpIC);
5719   }
5720 
5721   // Clamp the interleave ranges to reasonable counts.
5722   unsigned MaxInterleaveCount =
5723       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
5724 
5725   // Check if the user has overridden the max.
5726   if (VF.isScalar()) {
5727     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5728       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5729   } else {
5730     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5731       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5732   }
5733 
5734   // If trip count is known or estimated compile time constant, limit the
5735   // interleave count to be less than the trip count divided by VF, provided it
5736   // is at least 1.
5737   //
5738   // For scalable vectors we can't know if interleaving is beneficial. It may
5739   // not be beneficial for small loops if none of the lanes in the second vector
5740   // iterations is enabled. However, for larger loops, there is likely to be a
5741   // similar benefit as for fixed-width vectors. For now, we choose to leave
5742   // the InterleaveCount as if vscale is '1', although if some information about
5743   // the vector is known (e.g. min vector size), we can make a better decision.
5744   if (BestKnownTC) {
5745     MaxInterleaveCount =
5746         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
5747     // Make sure MaxInterleaveCount is greater than 0.
5748     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
5749   }
5750 
5751   assert(MaxInterleaveCount > 0 &&
5752          "Maximum interleave count must be greater than 0");
5753 
5754   // Clamp the calculated IC to be between the 1 and the max interleave count
5755   // that the target and trip count allows.
5756   if (IC > MaxInterleaveCount)
5757     IC = MaxInterleaveCount;
5758   else
5759     // Make sure IC is greater than 0.
5760     IC = std::max(1u, IC);
5761 
5762   assert(IC > 0 && "Interleave count must be greater than 0.");
5763 
5764   // Interleave if we vectorized this loop and there is a reduction that could
5765   // benefit from interleaving.
5766   if (VF.isVector() && HasReductions) {
5767     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5768     return IC;
5769   }
5770 
5771   // For any scalar loop that either requires runtime checks or predication we
5772   // are better off leaving this to the unroller. Note that if we've already
5773   // vectorized the loop we will have done the runtime check and so interleaving
5774   // won't require further checks.
5775   bool ScalarInterleavingRequiresPredication =
5776       (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
5777          return Legal->blockNeedsPredication(BB);
5778        }));
5779   bool ScalarInterleavingRequiresRuntimePointerCheck =
5780       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
5781 
5782   // We want to interleave small loops in order to reduce the loop overhead and
5783   // potentially expose ILP opportunities.
5784   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
5785                     << "LV: IC is " << IC << '\n'
5786                     << "LV: VF is " << VF << '\n');
5787   const bool AggressivelyInterleaveReductions =
5788       TTI.enableAggressiveInterleaving(HasReductions);
5789   if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5790       !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
5791     // We assume that the cost overhead is 1 and we use the cost model
5792     // to estimate the cost of the loop and interleave until the cost of the
5793     // loop overhead is about 5% of the cost of the loop.
5794     unsigned SmallIC =
5795         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5796 
5797     // Interleave until store/load ports (estimated by max interleave count) are
5798     // saturated.
5799     unsigned NumStores = Legal->getNumStores();
5800     unsigned NumLoads = Legal->getNumLoads();
5801     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5802     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5803 
5804     // There is little point in interleaving for reductions containing selects
5805     // and compares when VF=1 since it may just create more overhead than it's
5806     // worth for loops with small trip counts. This is because we still have to
5807     // do the final reduction after the loop.
5808     bool HasSelectCmpReductions =
5809         HasReductions &&
5810         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5811           const RecurrenceDescriptor &RdxDesc = Reduction.second;
5812           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
5813               RdxDesc.getRecurrenceKind());
5814         });
5815     if (HasSelectCmpReductions) {
5816       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
5817       return 1;
5818     }
5819 
5820     // If we have a scalar reduction (vector reductions are already dealt with
5821     // by this point), we can increase the critical path length if the loop
5822     // we're interleaving is inside another loop. For tree-wise reductions
5823     // set the limit to 2, and for ordered reductions it's best to disable
5824     // interleaving entirely.
5825     if (HasReductions && TheLoop->getLoopDepth() > 1) {
5826       bool HasOrderedReductions =
5827           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5828             const RecurrenceDescriptor &RdxDesc = Reduction.second;
5829             return RdxDesc.isOrdered();
5830           });
5831       if (HasOrderedReductions) {
5832         LLVM_DEBUG(
5833             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
5834         return 1;
5835       }
5836 
5837       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5838       SmallIC = std::min(SmallIC, F);
5839       StoresIC = std::min(StoresIC, F);
5840       LoadsIC = std::min(LoadsIC, F);
5841     }
5842 
5843     if (EnableLoadStoreRuntimeInterleave &&
5844         std::max(StoresIC, LoadsIC) > SmallIC) {
5845       LLVM_DEBUG(
5846           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5847       return std::max(StoresIC, LoadsIC);
5848     }
5849 
5850     // If there are scalar reductions and TTI has enabled aggressive
5851     // interleaving for reductions, we will interleave to expose ILP.
5852     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
5853         AggressivelyInterleaveReductions) {
5854       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5855       // Interleave no less than SmallIC but not as aggressive as the normal IC
5856       // to satisfy the rare situation when resources are too limited.
5857       return std::max(IC / 2, SmallIC);
5858     } else {
5859       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5860       return SmallIC;
5861     }
5862   }
5863 
5864   // Interleave if this is a large loop (small loops are already dealt with by
5865   // this point) that could benefit from interleaving.
5866   if (AggressivelyInterleaveReductions) {
5867     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5868     return IC;
5869   }
5870 
5871   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5872   return 1;
5873 }
5874 
5875 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5876 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
5877   // This function calculates the register usage by measuring the highest number
5878   // of values that are alive at a single location. Obviously, this is a very
5879   // rough estimation. We scan the loop in a topological order in order and
5880   // assign a number to each instruction. We use RPO to ensure that defs are
5881   // met before their users. We assume that each instruction that has in-loop
5882   // users starts an interval. We record every time that an in-loop value is
5883   // used, so we have a list of the first and last occurrences of each
5884   // instruction. Next, we transpose this data structure into a multi map that
5885   // holds the list of intervals that *end* at a specific location. This multi
5886   // map allows us to perform a linear search. We scan the instructions linearly
5887   // and record each time that a new interval starts, by placing it in a set.
5888   // If we find this value in the multi-map then we remove it from the set.
5889   // The max register usage is the maximum size of the set.
5890   // We also search for instructions that are defined outside the loop, but are
5891   // used inside the loop. We need this number separately from the max-interval
5892   // usage number because when we unroll, loop-invariant values do not take
5893   // more register.
5894   LoopBlocksDFS DFS(TheLoop);
5895   DFS.perform(LI);
5896 
5897   RegisterUsage RU;
5898 
5899   // Each 'key' in the map opens a new interval. The values
5900   // of the map are the index of the 'last seen' usage of the
5901   // instruction that is the key.
5902   using IntervalMap = DenseMap<Instruction *, unsigned>;
5903 
5904   // Maps instruction to its index.
5905   SmallVector<Instruction *, 64> IdxToInstr;
5906   // Marks the end of each interval.
5907   IntervalMap EndPoint;
5908   // Saves the list of instruction indices that are used in the loop.
5909   SmallPtrSet<Instruction *, 8> Ends;
5910   // Saves the list of values that are used in the loop but are
5911   // defined outside the loop, such as arguments and constants.
5912   SmallPtrSet<Value *, 8> LoopInvariants;
5913 
5914   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5915     for (Instruction &I : BB->instructionsWithoutDebug()) {
5916       IdxToInstr.push_back(&I);
5917 
5918       // Save the end location of each USE.
5919       for (Value *U : I.operands()) {
5920         auto *Instr = dyn_cast<Instruction>(U);
5921 
5922         // Ignore non-instruction values such as arguments, constants, etc.
5923         if (!Instr)
5924           continue;
5925 
5926         // If this instruction is outside the loop then record it and continue.
5927         if (!TheLoop->contains(Instr)) {
5928           LoopInvariants.insert(Instr);
5929           continue;
5930         }
5931 
5932         // Overwrite previous end points.
5933         EndPoint[Instr] = IdxToInstr.size();
5934         Ends.insert(Instr);
5935       }
5936     }
5937   }
5938 
5939   // Saves the list of intervals that end with the index in 'key'.
5940   using InstrList = SmallVector<Instruction *, 2>;
5941   DenseMap<unsigned, InstrList> TransposeEnds;
5942 
5943   // Transpose the EndPoints to a list of values that end at each index.
5944   for (auto &Interval : EndPoint)
5945     TransposeEnds[Interval.second].push_back(Interval.first);
5946 
5947   SmallPtrSet<Instruction *, 8> OpenIntervals;
5948   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5949   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5950 
5951   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5952 
5953   // A lambda that gets the register usage for the given type and VF.
5954   const auto &TTICapture = TTI;
5955   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
5956     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
5957       return 0;
5958     InstructionCost::CostType RegUsage =
5959         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
5960     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
5961            "Nonsensical values for register usage.");
5962     return RegUsage;
5963   };
5964 
5965   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5966     Instruction *I = IdxToInstr[i];
5967 
5968     // Remove all of the instructions that end at this location.
5969     InstrList &List = TransposeEnds[i];
5970     for (Instruction *ToRemove : List)
5971       OpenIntervals.erase(ToRemove);
5972 
5973     // Ignore instructions that are never used within the loop.
5974     if (!Ends.count(I))
5975       continue;
5976 
5977     // Skip ignored values.
5978     if (ValuesToIgnore.count(I))
5979       continue;
5980 
5981     // For each VF find the maximum usage of registers.
5982     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5983       // Count the number of live intervals.
5984       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5985 
5986       if (VFs[j].isScalar()) {
5987         for (auto Inst : OpenIntervals) {
5988           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5989           if (RegUsage.find(ClassID) == RegUsage.end())
5990             RegUsage[ClassID] = 1;
5991           else
5992             RegUsage[ClassID] += 1;
5993         }
5994       } else {
5995         collectUniformsAndScalars(VFs[j]);
5996         for (auto Inst : OpenIntervals) {
5997           // Skip ignored values for VF > 1.
5998           if (VecValuesToIgnore.count(Inst))
5999             continue;
6000           if (isScalarAfterVectorization(Inst, VFs[j])) {
6001             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6002             if (RegUsage.find(ClassID) == RegUsage.end())
6003               RegUsage[ClassID] = 1;
6004             else
6005               RegUsage[ClassID] += 1;
6006           } else {
6007             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6008             if (RegUsage.find(ClassID) == RegUsage.end())
6009               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6010             else
6011               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6012           }
6013         }
6014       }
6015 
6016       for (auto& pair : RegUsage) {
6017         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6018           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6019         else
6020           MaxUsages[j][pair.first] = pair.second;
6021       }
6022     }
6023 
6024     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6025                       << OpenIntervals.size() << '\n');
6026 
6027     // Add the current instruction to the list of open intervals.
6028     OpenIntervals.insert(I);
6029   }
6030 
6031   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6032     SmallMapVector<unsigned, unsigned, 4> Invariant;
6033 
6034     for (auto Inst : LoopInvariants) {
6035       unsigned Usage =
6036           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6037       unsigned ClassID =
6038           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6039       if (Invariant.find(ClassID) == Invariant.end())
6040         Invariant[ClassID] = Usage;
6041       else
6042         Invariant[ClassID] += Usage;
6043     }
6044 
6045     LLVM_DEBUG({
6046       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6047       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6048              << " item\n";
6049       for (const auto &pair : MaxUsages[i]) {
6050         dbgs() << "LV(REG): RegisterClass: "
6051                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6052                << " registers\n";
6053       }
6054       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6055              << " item\n";
6056       for (const auto &pair : Invariant) {
6057         dbgs() << "LV(REG): RegisterClass: "
6058                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6059                << " registers\n";
6060       }
6061     });
6062 
6063     RU.LoopInvariantRegs = Invariant;
6064     RU.MaxLocalUsers = MaxUsages[i];
6065     RUs[i] = RU;
6066   }
6067 
6068   return RUs;
6069 }
6070 
6071 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6072                                                            ElementCount VF) {
6073   // TODO: Cost model for emulated masked load/store is completely
6074   // broken. This hack guides the cost model to use an artificially
6075   // high enough value to practically disable vectorization with such
6076   // operations, except where previously deployed legality hack allowed
6077   // using very low cost values. This is to avoid regressions coming simply
6078   // from moving "masked load/store" check from legality to cost model.
6079   // Masked Load/Gather emulation was previously never allowed.
6080   // Limited number of Masked Store/Scatter emulation was allowed.
6081   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6082   return isa<LoadInst>(I) ||
6083          (isa<StoreInst>(I) &&
6084           NumPredStores > NumberOfStoresToPredicate);
6085 }
6086 
6087 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6088   // If we aren't vectorizing the loop, or if we've already collected the
6089   // instructions to scalarize, there's nothing to do. Collection may already
6090   // have occurred if we have a user-selected VF and are now computing the
6091   // expected cost for interleaving.
6092   if (VF.isScalar() || VF.isZero() ||
6093       InstsToScalarize.find(VF) != InstsToScalarize.end())
6094     return;
6095 
6096   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6097   // not profitable to scalarize any instructions, the presence of VF in the
6098   // map will indicate that we've analyzed it already.
6099   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6100 
6101   // Find all the instructions that are scalar with predication in the loop and
6102   // determine if it would be better to not if-convert the blocks they are in.
6103   // If so, we also record the instructions to scalarize.
6104   for (BasicBlock *BB : TheLoop->blocks()) {
6105     if (!blockNeedsPredicationForAnyReason(BB))
6106       continue;
6107     for (Instruction &I : *BB)
6108       if (isScalarWithPredication(&I, VF)) {
6109         ScalarCostsTy ScalarCosts;
6110         // Do not apply discount if scalable, because that would lead to
6111         // invalid scalarization costs.
6112         // Do not apply discount logic if hacked cost is needed
6113         // for emulated masked memrefs.
6114         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6115             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6116           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6117         // Remember that BB will remain after vectorization.
6118         PredicatedBBsAfterVectorization.insert(BB);
6119       }
6120   }
6121 }
6122 
6123 int LoopVectorizationCostModel::computePredInstDiscount(
6124     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6125   assert(!isUniformAfterVectorization(PredInst, VF) &&
6126          "Instruction marked uniform-after-vectorization will be predicated");
6127 
6128   // Initialize the discount to zero, meaning that the scalar version and the
6129   // vector version cost the same.
6130   InstructionCost Discount = 0;
6131 
6132   // Holds instructions to analyze. The instructions we visit are mapped in
6133   // ScalarCosts. Those instructions are the ones that would be scalarized if
6134   // we find that the scalar version costs less.
6135   SmallVector<Instruction *, 8> Worklist;
6136 
6137   // Returns true if the given instruction can be scalarized.
6138   auto canBeScalarized = [&](Instruction *I) -> bool {
6139     // We only attempt to scalarize instructions forming a single-use chain
6140     // from the original predicated block that would otherwise be vectorized.
6141     // Although not strictly necessary, we give up on instructions we know will
6142     // already be scalar to avoid traversing chains that are unlikely to be
6143     // beneficial.
6144     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6145         isScalarAfterVectorization(I, VF))
6146       return false;
6147 
6148     // If the instruction is scalar with predication, it will be analyzed
6149     // separately. We ignore it within the context of PredInst.
6150     if (isScalarWithPredication(I, VF))
6151       return false;
6152 
6153     // If any of the instruction's operands are uniform after vectorization,
6154     // the instruction cannot be scalarized. This prevents, for example, a
6155     // masked load from being scalarized.
6156     //
6157     // We assume we will only emit a value for lane zero of an instruction
6158     // marked uniform after vectorization, rather than VF identical values.
6159     // Thus, if we scalarize an instruction that uses a uniform, we would
6160     // create uses of values corresponding to the lanes we aren't emitting code
6161     // for. This behavior can be changed by allowing getScalarValue to clone
6162     // the lane zero values for uniforms rather than asserting.
6163     for (Use &U : I->operands())
6164       if (auto *J = dyn_cast<Instruction>(U.get()))
6165         if (isUniformAfterVectorization(J, VF))
6166           return false;
6167 
6168     // Otherwise, we can scalarize the instruction.
6169     return true;
6170   };
6171 
6172   // Compute the expected cost discount from scalarizing the entire expression
6173   // feeding the predicated instruction. We currently only consider expressions
6174   // that are single-use instruction chains.
6175   Worklist.push_back(PredInst);
6176   while (!Worklist.empty()) {
6177     Instruction *I = Worklist.pop_back_val();
6178 
6179     // If we've already analyzed the instruction, there's nothing to do.
6180     if (ScalarCosts.find(I) != ScalarCosts.end())
6181       continue;
6182 
6183     // Compute the cost of the vector instruction. Note that this cost already
6184     // includes the scalarization overhead of the predicated instruction.
6185     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6186 
6187     // Compute the cost of the scalarized instruction. This cost is the cost of
6188     // the instruction as if it wasn't if-converted and instead remained in the
6189     // predicated block. We will scale this cost by block probability after
6190     // computing the scalarization overhead.
6191     InstructionCost ScalarCost =
6192         VF.getFixedValue() *
6193         getInstructionCost(I, ElementCount::getFixed(1)).first;
6194 
6195     // Compute the scalarization overhead of needed insertelement instructions
6196     // and phi nodes.
6197     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6198       ScalarCost += TTI.getScalarizationOverhead(
6199           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6200           APInt::getAllOnes(VF.getFixedValue()), true, false);
6201       ScalarCost +=
6202           VF.getFixedValue() *
6203           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6204     }
6205 
6206     // Compute the scalarization overhead of needed extractelement
6207     // instructions. For each of the instruction's operands, if the operand can
6208     // be scalarized, add it to the worklist; otherwise, account for the
6209     // overhead.
6210     for (Use &U : I->operands())
6211       if (auto *J = dyn_cast<Instruction>(U.get())) {
6212         assert(VectorType::isValidElementType(J->getType()) &&
6213                "Instruction has non-scalar type");
6214         if (canBeScalarized(J))
6215           Worklist.push_back(J);
6216         else if (needsExtract(J, VF)) {
6217           ScalarCost += TTI.getScalarizationOverhead(
6218               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6219               APInt::getAllOnes(VF.getFixedValue()), false, true);
6220         }
6221       }
6222 
6223     // Scale the total scalar cost by block probability.
6224     ScalarCost /= getReciprocalPredBlockProb();
6225 
6226     // Compute the discount. A non-negative discount means the vector version
6227     // of the instruction costs more, and scalarizing would be beneficial.
6228     Discount += VectorCost - ScalarCost;
6229     ScalarCosts[I] = ScalarCost;
6230   }
6231 
6232   return *Discount.getValue();
6233 }
6234 
6235 LoopVectorizationCostModel::VectorizationCostTy
6236 LoopVectorizationCostModel::expectedCost(
6237     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6238   VectorizationCostTy Cost;
6239 
6240   // For each block.
6241   for (BasicBlock *BB : TheLoop->blocks()) {
6242     VectorizationCostTy BlockCost;
6243 
6244     // For each instruction in the old loop.
6245     for (Instruction &I : BB->instructionsWithoutDebug()) {
6246       // Skip ignored values.
6247       if (ValuesToIgnore.count(&I) ||
6248           (VF.isVector() && VecValuesToIgnore.count(&I)))
6249         continue;
6250 
6251       VectorizationCostTy C = getInstructionCost(&I, VF);
6252 
6253       // Check if we should override the cost.
6254       if (C.first.isValid() &&
6255           ForceTargetInstructionCost.getNumOccurrences() > 0)
6256         C.first = InstructionCost(ForceTargetInstructionCost);
6257 
6258       // Keep a list of instructions with invalid costs.
6259       if (Invalid && !C.first.isValid())
6260         Invalid->emplace_back(&I, VF);
6261 
6262       BlockCost.first += C.first;
6263       BlockCost.second |= C.second;
6264       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6265                         << " for VF " << VF << " For instruction: " << I
6266                         << '\n');
6267     }
6268 
6269     // If we are vectorizing a predicated block, it will have been
6270     // if-converted. This means that the block's instructions (aside from
6271     // stores and instructions that may divide by zero) will now be
6272     // unconditionally executed. For the scalar case, we may not always execute
6273     // the predicated block, if it is an if-else block. Thus, scale the block's
6274     // cost by the probability of executing it. blockNeedsPredication from
6275     // Legal is used so as to not include all blocks in tail folded loops.
6276     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6277       BlockCost.first /= getReciprocalPredBlockProb();
6278 
6279     Cost.first += BlockCost.first;
6280     Cost.second |= BlockCost.second;
6281   }
6282 
6283   return Cost;
6284 }
6285 
6286 /// Gets Address Access SCEV after verifying that the access pattern
6287 /// is loop invariant except the induction variable dependence.
6288 ///
6289 /// This SCEV can be sent to the Target in order to estimate the address
6290 /// calculation cost.
6291 static const SCEV *getAddressAccessSCEV(
6292               Value *Ptr,
6293               LoopVectorizationLegality *Legal,
6294               PredicatedScalarEvolution &PSE,
6295               const Loop *TheLoop) {
6296 
6297   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6298   if (!Gep)
6299     return nullptr;
6300 
6301   // We are looking for a gep with all loop invariant indices except for one
6302   // which should be an induction variable.
6303   auto SE = PSE.getSE();
6304   unsigned NumOperands = Gep->getNumOperands();
6305   for (unsigned i = 1; i < NumOperands; ++i) {
6306     Value *Opd = Gep->getOperand(i);
6307     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6308         !Legal->isInductionVariable(Opd))
6309       return nullptr;
6310   }
6311 
6312   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6313   return PSE.getSCEV(Ptr);
6314 }
6315 
6316 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6317   return Legal->hasStride(I->getOperand(0)) ||
6318          Legal->hasStride(I->getOperand(1));
6319 }
6320 
6321 InstructionCost
6322 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6323                                                         ElementCount VF) {
6324   assert(VF.isVector() &&
6325          "Scalarization cost of instruction implies vectorization.");
6326   if (VF.isScalable())
6327     return InstructionCost::getInvalid();
6328 
6329   Type *ValTy = getLoadStoreType(I);
6330   auto SE = PSE.getSE();
6331 
6332   unsigned AS = getLoadStoreAddressSpace(I);
6333   Value *Ptr = getLoadStorePointerOperand(I);
6334   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6335   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6336   //       that it is being called from this specific place.
6337 
6338   // Figure out whether the access is strided and get the stride value
6339   // if it's known in compile time
6340   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6341 
6342   // Get the cost of the scalar memory instruction and address computation.
6343   InstructionCost Cost =
6344       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6345 
6346   // Don't pass *I here, since it is scalar but will actually be part of a
6347   // vectorized loop where the user of it is a vectorized instruction.
6348   const Align Alignment = getLoadStoreAlignment(I);
6349   Cost += VF.getKnownMinValue() *
6350           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6351                               AS, TTI::TCK_RecipThroughput);
6352 
6353   // Get the overhead of the extractelement and insertelement instructions
6354   // we might create due to scalarization.
6355   Cost += getScalarizationOverhead(I, VF);
6356 
6357   // If we have a predicated load/store, it will need extra i1 extracts and
6358   // conditional branches, but may not be executed for each vector lane. Scale
6359   // the cost by the probability of executing the predicated block.
6360   if (isPredicatedInst(I, VF)) {
6361     Cost /= getReciprocalPredBlockProb();
6362 
6363     // Add the cost of an i1 extract and a branch
6364     auto *Vec_i1Ty =
6365         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6366     Cost += TTI.getScalarizationOverhead(
6367         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6368         /*Insert=*/false, /*Extract=*/true);
6369     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6370 
6371     if (useEmulatedMaskMemRefHack(I, VF))
6372       // Artificially setting to a high enough value to practically disable
6373       // vectorization with such operations.
6374       Cost = 3000000;
6375   }
6376 
6377   return Cost;
6378 }
6379 
6380 InstructionCost
6381 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6382                                                     ElementCount VF) {
6383   Type *ValTy = getLoadStoreType(I);
6384   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6385   Value *Ptr = getLoadStorePointerOperand(I);
6386   unsigned AS = getLoadStoreAddressSpace(I);
6387   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6388   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6389 
6390   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6391          "Stride should be 1 or -1 for consecutive memory access");
6392   const Align Alignment = getLoadStoreAlignment(I);
6393   InstructionCost Cost = 0;
6394   if (Legal->isMaskRequired(I))
6395     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6396                                       CostKind);
6397   else
6398     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6399                                 CostKind, I);
6400 
6401   bool Reverse = ConsecutiveStride < 0;
6402   if (Reverse)
6403     Cost +=
6404         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6405   return Cost;
6406 }
6407 
6408 InstructionCost
6409 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6410                                                 ElementCount VF) {
6411   assert(Legal->isUniformMemOp(*I));
6412 
6413   Type *ValTy = getLoadStoreType(I);
6414   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6415   const Align Alignment = getLoadStoreAlignment(I);
6416   unsigned AS = getLoadStoreAddressSpace(I);
6417   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6418   if (isa<LoadInst>(I)) {
6419     return TTI.getAddressComputationCost(ValTy) +
6420            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6421                                CostKind) +
6422            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6423   }
6424   StoreInst *SI = cast<StoreInst>(I);
6425 
6426   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6427   return TTI.getAddressComputationCost(ValTy) +
6428          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6429                              CostKind) +
6430          (isLoopInvariantStoreValue
6431               ? 0
6432               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6433                                        VF.getKnownMinValue() - 1));
6434 }
6435 
6436 InstructionCost
6437 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6438                                                  ElementCount VF) {
6439   Type *ValTy = getLoadStoreType(I);
6440   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6441   const Align Alignment = getLoadStoreAlignment(I);
6442   const Value *Ptr = getLoadStorePointerOperand(I);
6443 
6444   return TTI.getAddressComputationCost(VectorTy) +
6445          TTI.getGatherScatterOpCost(
6446              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6447              TargetTransformInfo::TCK_RecipThroughput, I);
6448 }
6449 
6450 InstructionCost
6451 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6452                                                    ElementCount VF) {
6453   // TODO: Once we have support for interleaving with scalable vectors
6454   // we can calculate the cost properly here.
6455   if (VF.isScalable())
6456     return InstructionCost::getInvalid();
6457 
6458   Type *ValTy = getLoadStoreType(I);
6459   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6460   unsigned AS = getLoadStoreAddressSpace(I);
6461 
6462   auto Group = getInterleavedAccessGroup(I);
6463   assert(Group && "Fail to get an interleaved access group.");
6464 
6465   unsigned InterleaveFactor = Group->getFactor();
6466   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6467 
6468   // Holds the indices of existing members in the interleaved group.
6469   SmallVector<unsigned, 4> Indices;
6470   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6471     if (Group->getMember(IF))
6472       Indices.push_back(IF);
6473 
6474   // Calculate the cost of the whole interleaved group.
6475   bool UseMaskForGaps =
6476       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6477       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6478   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6479       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6480       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6481 
6482   if (Group->isReverse()) {
6483     // TODO: Add support for reversed masked interleaved access.
6484     assert(!Legal->isMaskRequired(I) &&
6485            "Reverse masked interleaved access not supported.");
6486     Cost +=
6487         Group->getNumMembers() *
6488         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6489   }
6490   return Cost;
6491 }
6492 
6493 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6494     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6495   using namespace llvm::PatternMatch;
6496   // Early exit for no inloop reductions
6497   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6498     return None;
6499   auto *VectorTy = cast<VectorType>(Ty);
6500 
6501   // We are looking for a pattern of, and finding the minimal acceptable cost:
6502   //  reduce(mul(ext(A), ext(B))) or
6503   //  reduce(mul(A, B)) or
6504   //  reduce(ext(A)) or
6505   //  reduce(A).
6506   // The basic idea is that we walk down the tree to do that, finding the root
6507   // reduction instruction in InLoopReductionImmediateChains. From there we find
6508   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6509   // of the components. If the reduction cost is lower then we return it for the
6510   // reduction instruction and 0 for the other instructions in the pattern. If
6511   // it is not we return an invalid cost specifying the orignal cost method
6512   // should be used.
6513   Instruction *RetI = I;
6514   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6515     if (!RetI->hasOneUser())
6516       return None;
6517     RetI = RetI->user_back();
6518   }
6519   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6520       RetI->user_back()->getOpcode() == Instruction::Add) {
6521     if (!RetI->hasOneUser())
6522       return None;
6523     RetI = RetI->user_back();
6524   }
6525 
6526   // Test if the found instruction is a reduction, and if not return an invalid
6527   // cost specifying the parent to use the original cost modelling.
6528   if (!InLoopReductionImmediateChains.count(RetI))
6529     return None;
6530 
6531   // Find the reduction this chain is a part of and calculate the basic cost of
6532   // the reduction on its own.
6533   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6534   Instruction *ReductionPhi = LastChain;
6535   while (!isa<PHINode>(ReductionPhi))
6536     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6537 
6538   const RecurrenceDescriptor &RdxDesc =
6539       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6540 
6541   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6542       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6543 
6544   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6545   // normal fmul instruction to the cost of the fadd reduction.
6546   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6547     BaseCost +=
6548         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6549 
6550   // If we're using ordered reductions then we can just return the base cost
6551   // here, since getArithmeticReductionCost calculates the full ordered
6552   // reduction cost when FP reassociation is not allowed.
6553   if (useOrderedReductions(RdxDesc))
6554     return BaseCost;
6555 
6556   // Get the operand that was not the reduction chain and match it to one of the
6557   // patterns, returning the better cost if it is found.
6558   Instruction *RedOp = RetI->getOperand(1) == LastChain
6559                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6560                            : dyn_cast<Instruction>(RetI->getOperand(1));
6561 
6562   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6563 
6564   Instruction *Op0, *Op1;
6565   if (RedOp &&
6566       match(RedOp,
6567             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6568       match(Op0, m_ZExtOrSExt(m_Value())) &&
6569       Op0->getOpcode() == Op1->getOpcode() &&
6570       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6571       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6572       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6573 
6574     // Matched reduce(ext(mul(ext(A), ext(B)))
6575     // Note that the extend opcodes need to all match, or if A==B they will have
6576     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6577     // which is equally fine.
6578     bool IsUnsigned = isa<ZExtInst>(Op0);
6579     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6580     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6581 
6582     InstructionCost ExtCost =
6583         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6584                              TTI::CastContextHint::None, CostKind, Op0);
6585     InstructionCost MulCost =
6586         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6587     InstructionCost Ext2Cost =
6588         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6589                              TTI::CastContextHint::None, CostKind, RedOp);
6590 
6591     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6592         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6593         CostKind);
6594 
6595     if (RedCost.isValid() &&
6596         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6597       return I == RetI ? RedCost : 0;
6598   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6599              !TheLoop->isLoopInvariant(RedOp)) {
6600     // Matched reduce(ext(A))
6601     bool IsUnsigned = isa<ZExtInst>(RedOp);
6602     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6603     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6604         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6605         CostKind);
6606 
6607     InstructionCost ExtCost =
6608         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6609                              TTI::CastContextHint::None, CostKind, RedOp);
6610     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6611       return I == RetI ? RedCost : 0;
6612   } else if (RedOp &&
6613              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6614     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6615         Op0->getOpcode() == Op1->getOpcode() &&
6616         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6617       bool IsUnsigned = isa<ZExtInst>(Op0);
6618       Type *Op0Ty = Op0->getOperand(0)->getType();
6619       Type *Op1Ty = Op1->getOperand(0)->getType();
6620       Type *LargestOpTy =
6621           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6622                                                                     : Op0Ty;
6623       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6624 
6625       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6626       // different sizes. We take the largest type as the ext to reduce, and add
6627       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6628       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6629           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6630           TTI::CastContextHint::None, CostKind, Op0);
6631       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6632           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6633           TTI::CastContextHint::None, CostKind, Op1);
6634       InstructionCost MulCost =
6635           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6636 
6637       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6638           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6639           CostKind);
6640       InstructionCost ExtraExtCost = 0;
6641       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6642         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6643         ExtraExtCost = TTI.getCastInstrCost(
6644             ExtraExtOp->getOpcode(), ExtType,
6645             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
6646             TTI::CastContextHint::None, CostKind, ExtraExtOp);
6647       }
6648 
6649       if (RedCost.isValid() &&
6650           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6651         return I == RetI ? RedCost : 0;
6652     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
6653       // Matched reduce(mul())
6654       InstructionCost MulCost =
6655           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6656 
6657       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6658           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6659           CostKind);
6660 
6661       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6662         return I == RetI ? RedCost : 0;
6663     }
6664   }
6665 
6666   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
6667 }
6668 
6669 InstructionCost
6670 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6671                                                      ElementCount VF) {
6672   // Calculate scalar cost only. Vectorization cost should be ready at this
6673   // moment.
6674   if (VF.isScalar()) {
6675     Type *ValTy = getLoadStoreType(I);
6676     const Align Alignment = getLoadStoreAlignment(I);
6677     unsigned AS = getLoadStoreAddressSpace(I);
6678 
6679     return TTI.getAddressComputationCost(ValTy) +
6680            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6681                                TTI::TCK_RecipThroughput, I);
6682   }
6683   return getWideningCost(I, VF);
6684 }
6685 
6686 LoopVectorizationCostModel::VectorizationCostTy
6687 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6688                                                ElementCount VF) {
6689   // If we know that this instruction will remain uniform, check the cost of
6690   // the scalar version.
6691   if (isUniformAfterVectorization(I, VF))
6692     VF = ElementCount::getFixed(1);
6693 
6694   if (VF.isVector() && isProfitableToScalarize(I, VF))
6695     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6696 
6697   // Forced scalars do not have any scalarization overhead.
6698   auto ForcedScalar = ForcedScalars.find(VF);
6699   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6700     auto InstSet = ForcedScalar->second;
6701     if (InstSet.count(I))
6702       return VectorizationCostTy(
6703           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6704            VF.getKnownMinValue()),
6705           false);
6706   }
6707 
6708   Type *VectorTy;
6709   InstructionCost C = getInstructionCost(I, VF, VectorTy);
6710 
6711   bool TypeNotScalarized = false;
6712   if (VF.isVector() && VectorTy->isVectorTy()) {
6713     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
6714     if (NumParts)
6715       TypeNotScalarized = NumParts < VF.getKnownMinValue();
6716     else
6717       C = InstructionCost::getInvalid();
6718   }
6719   return VectorizationCostTy(C, TypeNotScalarized);
6720 }
6721 
6722 InstructionCost
6723 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6724                                                      ElementCount VF) const {
6725 
6726   // There is no mechanism yet to create a scalable scalarization loop,
6727   // so this is currently Invalid.
6728   if (VF.isScalable())
6729     return InstructionCost::getInvalid();
6730 
6731   if (VF.isScalar())
6732     return 0;
6733 
6734   InstructionCost Cost = 0;
6735   Type *RetTy = ToVectorTy(I->getType(), VF);
6736   if (!RetTy->isVoidTy() &&
6737       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6738     Cost += TTI.getScalarizationOverhead(
6739         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
6740         false);
6741 
6742   // Some targets keep addresses scalar.
6743   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6744     return Cost;
6745 
6746   // Some targets support efficient element stores.
6747   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6748     return Cost;
6749 
6750   // Collect operands to consider.
6751   CallInst *CI = dyn_cast<CallInst>(I);
6752   Instruction::op_range Ops = CI ? CI->args() : I->operands();
6753 
6754   // Skip operands that do not require extraction/scalarization and do not incur
6755   // any overhead.
6756   SmallVector<Type *> Tys;
6757   for (auto *V : filterExtractingOperands(Ops, VF))
6758     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
6759   return Cost + TTI.getOperandsScalarizationOverhead(
6760                     filterExtractingOperands(Ops, VF), Tys);
6761 }
6762 
6763 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6764   if (VF.isScalar())
6765     return;
6766   NumPredStores = 0;
6767   for (BasicBlock *BB : TheLoop->blocks()) {
6768     // For each instruction in the old loop.
6769     for (Instruction &I : *BB) {
6770       Value *Ptr =  getLoadStorePointerOperand(&I);
6771       if (!Ptr)
6772         continue;
6773 
6774       // TODO: We should generate better code and update the cost model for
6775       // predicated uniform stores. Today they are treated as any other
6776       // predicated store (see added test cases in
6777       // invariant-store-vectorization.ll).
6778       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
6779         NumPredStores++;
6780 
6781       if (Legal->isUniformMemOp(I)) {
6782         // TODO: Avoid replicating loads and stores instead of
6783         // relying on instcombine to remove them.
6784         // Load: Scalar load + broadcast
6785         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6786         InstructionCost Cost;
6787         if (isa<StoreInst>(&I) && VF.isScalable() &&
6788             isLegalGatherOrScatter(&I, VF)) {
6789           Cost = getGatherScatterCost(&I, VF);
6790           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
6791         } else {
6792           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
6793                  "Cannot yet scalarize uniform stores");
6794           Cost = getUniformMemOpCost(&I, VF);
6795           setWideningDecision(&I, VF, CM_Scalarize, Cost);
6796         }
6797         continue;
6798       }
6799 
6800       // We assume that widening is the best solution when possible.
6801       if (memoryInstructionCanBeWidened(&I, VF)) {
6802         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
6803         int ConsecutiveStride = Legal->isConsecutivePtr(
6804             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
6805         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6806                "Expected consecutive stride.");
6807         InstWidening Decision =
6808             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6809         setWideningDecision(&I, VF, Decision, Cost);
6810         continue;
6811       }
6812 
6813       // Choose between Interleaving, Gather/Scatter or Scalarization.
6814       InstructionCost InterleaveCost = InstructionCost::getInvalid();
6815       unsigned NumAccesses = 1;
6816       if (isAccessInterleaved(&I)) {
6817         auto Group = getInterleavedAccessGroup(&I);
6818         assert(Group && "Fail to get an interleaved access group.");
6819 
6820         // Make one decision for the whole group.
6821         if (getWideningDecision(&I, VF) != CM_Unknown)
6822           continue;
6823 
6824         NumAccesses = Group->getNumMembers();
6825         if (interleavedAccessCanBeWidened(&I, VF))
6826           InterleaveCost = getInterleaveGroupCost(&I, VF);
6827       }
6828 
6829       InstructionCost GatherScatterCost =
6830           isLegalGatherOrScatter(&I, VF)
6831               ? getGatherScatterCost(&I, VF) * NumAccesses
6832               : InstructionCost::getInvalid();
6833 
6834       InstructionCost ScalarizationCost =
6835           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6836 
6837       // Choose better solution for the current VF,
6838       // write down this decision and use it during vectorization.
6839       InstructionCost Cost;
6840       InstWidening Decision;
6841       if (InterleaveCost <= GatherScatterCost &&
6842           InterleaveCost < ScalarizationCost) {
6843         Decision = CM_Interleave;
6844         Cost = InterleaveCost;
6845       } else if (GatherScatterCost < ScalarizationCost) {
6846         Decision = CM_GatherScatter;
6847         Cost = GatherScatterCost;
6848       } else {
6849         Decision = CM_Scalarize;
6850         Cost = ScalarizationCost;
6851       }
6852       // If the instructions belongs to an interleave group, the whole group
6853       // receives the same decision. The whole group receives the cost, but
6854       // the cost will actually be assigned to one instruction.
6855       if (auto Group = getInterleavedAccessGroup(&I))
6856         setWideningDecision(Group, VF, Decision, Cost);
6857       else
6858         setWideningDecision(&I, VF, Decision, Cost);
6859     }
6860   }
6861 
6862   // Make sure that any load of address and any other address computation
6863   // remains scalar unless there is gather/scatter support. This avoids
6864   // inevitable extracts into address registers, and also has the benefit of
6865   // activating LSR more, since that pass can't optimize vectorized
6866   // addresses.
6867   if (TTI.prefersVectorizedAddressing())
6868     return;
6869 
6870   // Start with all scalar pointer uses.
6871   SmallPtrSet<Instruction *, 8> AddrDefs;
6872   for (BasicBlock *BB : TheLoop->blocks())
6873     for (Instruction &I : *BB) {
6874       Instruction *PtrDef =
6875         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6876       if (PtrDef && TheLoop->contains(PtrDef) &&
6877           getWideningDecision(&I, VF) != CM_GatherScatter)
6878         AddrDefs.insert(PtrDef);
6879     }
6880 
6881   // Add all instructions used to generate the addresses.
6882   SmallVector<Instruction *, 4> Worklist;
6883   append_range(Worklist, AddrDefs);
6884   while (!Worklist.empty()) {
6885     Instruction *I = Worklist.pop_back_val();
6886     for (auto &Op : I->operands())
6887       if (auto *InstOp = dyn_cast<Instruction>(Op))
6888         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6889             AddrDefs.insert(InstOp).second)
6890           Worklist.push_back(InstOp);
6891   }
6892 
6893   for (auto *I : AddrDefs) {
6894     if (isa<LoadInst>(I)) {
6895       // Setting the desired widening decision should ideally be handled in
6896       // by cost functions, but since this involves the task of finding out
6897       // if the loaded register is involved in an address computation, it is
6898       // instead changed here when we know this is the case.
6899       InstWidening Decision = getWideningDecision(I, VF);
6900       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6901         // Scalarize a widened load of address.
6902         setWideningDecision(
6903             I, VF, CM_Scalarize,
6904             (VF.getKnownMinValue() *
6905              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
6906       else if (auto Group = getInterleavedAccessGroup(I)) {
6907         // Scalarize an interleave group of address loads.
6908         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6909           if (Instruction *Member = Group->getMember(I))
6910             setWideningDecision(
6911                 Member, VF, CM_Scalarize,
6912                 (VF.getKnownMinValue() *
6913                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
6914         }
6915       }
6916     } else
6917       // Make sure I gets scalarized and a cost estimate without
6918       // scalarization overhead.
6919       ForcedScalars[VF].insert(I);
6920   }
6921 }
6922 
6923 InstructionCost
6924 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
6925                                                Type *&VectorTy) {
6926   Type *RetTy = I->getType();
6927   if (canTruncateToMinimalBitwidth(I, VF))
6928     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6929   auto SE = PSE.getSE();
6930   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6931 
6932   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
6933                                                 ElementCount VF) -> bool {
6934     if (VF.isScalar())
6935       return true;
6936 
6937     auto Scalarized = InstsToScalarize.find(VF);
6938     assert(Scalarized != InstsToScalarize.end() &&
6939            "VF not yet analyzed for scalarization profitability");
6940     return !Scalarized->second.count(I) &&
6941            llvm::all_of(I->users(), [&](User *U) {
6942              auto *UI = cast<Instruction>(U);
6943              return !Scalarized->second.count(UI);
6944            });
6945   };
6946   (void) hasSingleCopyAfterVectorization;
6947 
6948   if (isScalarAfterVectorization(I, VF)) {
6949     // With the exception of GEPs and PHIs, after scalarization there should
6950     // only be one copy of the instruction generated in the loop. This is
6951     // because the VF is either 1, or any instructions that need scalarizing
6952     // have already been dealt with by the the time we get here. As a result,
6953     // it means we don't have to multiply the instruction cost by VF.
6954     assert(I->getOpcode() == Instruction::GetElementPtr ||
6955            I->getOpcode() == Instruction::PHI ||
6956            (I->getOpcode() == Instruction::BitCast &&
6957             I->getType()->isPointerTy()) ||
6958            hasSingleCopyAfterVectorization(I, VF));
6959     VectorTy = RetTy;
6960   } else
6961     VectorTy = ToVectorTy(RetTy, VF);
6962 
6963   // TODO: We need to estimate the cost of intrinsic calls.
6964   switch (I->getOpcode()) {
6965   case Instruction::GetElementPtr:
6966     // We mark this instruction as zero-cost because the cost of GEPs in
6967     // vectorized code depends on whether the corresponding memory instruction
6968     // is scalarized or not. Therefore, we handle GEPs with the memory
6969     // instruction cost.
6970     return 0;
6971   case Instruction::Br: {
6972     // In cases of scalarized and predicated instructions, there will be VF
6973     // predicated blocks in the vectorized loop. Each branch around these
6974     // blocks requires also an extract of its vector compare i1 element.
6975     bool ScalarPredicatedBB = false;
6976     BranchInst *BI = cast<BranchInst>(I);
6977     if (VF.isVector() && BI->isConditional() &&
6978         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
6979          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
6980       ScalarPredicatedBB = true;
6981 
6982     if (ScalarPredicatedBB) {
6983       // Not possible to scalarize scalable vector with predicated instructions.
6984       if (VF.isScalable())
6985         return InstructionCost::getInvalid();
6986       // Return cost for branches around scalarized and predicated blocks.
6987       auto *Vec_i1Ty =
6988           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6989       return (
6990           TTI.getScalarizationOverhead(
6991               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
6992           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
6993     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6994       // The back-edge branch will remain, as will all scalar branches.
6995       return TTI.getCFInstrCost(Instruction::Br, CostKind);
6996     else
6997       // This branch will be eliminated by if-conversion.
6998       return 0;
6999     // Note: We currently assume zero cost for an unconditional branch inside
7000     // a predicated block since it will become a fall-through, although we
7001     // may decide in the future to call TTI for all branches.
7002   }
7003   case Instruction::PHI: {
7004     auto *Phi = cast<PHINode>(I);
7005 
7006     // First-order recurrences are replaced by vector shuffles inside the loop.
7007     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7008     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7009       return TTI.getShuffleCost(
7010           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7011           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7012 
7013     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7014     // converted into select instructions. We require N - 1 selects per phi
7015     // node, where N is the number of incoming values.
7016     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7017       return (Phi->getNumIncomingValues() - 1) *
7018              TTI.getCmpSelInstrCost(
7019                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7020                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7021                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7022 
7023     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7024   }
7025   case Instruction::UDiv:
7026   case Instruction::SDiv:
7027   case Instruction::URem:
7028   case Instruction::SRem:
7029     // If we have a predicated instruction, it may not be executed for each
7030     // vector lane. Get the scalarization cost and scale this amount by the
7031     // probability of executing the predicated block. If the instruction is not
7032     // predicated, we fall through to the next case.
7033     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7034       InstructionCost Cost = 0;
7035 
7036       // These instructions have a non-void type, so account for the phi nodes
7037       // that we will create. This cost is likely to be zero. The phi node
7038       // cost, if any, should be scaled by the block probability because it
7039       // models a copy at the end of each predicated block.
7040       Cost += VF.getKnownMinValue() *
7041               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7042 
7043       // The cost of the non-predicated instruction.
7044       Cost += VF.getKnownMinValue() *
7045               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7046 
7047       // The cost of insertelement and extractelement instructions needed for
7048       // scalarization.
7049       Cost += getScalarizationOverhead(I, VF);
7050 
7051       // Scale the cost by the probability of executing the predicated blocks.
7052       // This assumes the predicated block for each vector lane is equally
7053       // likely.
7054       return Cost / getReciprocalPredBlockProb();
7055     }
7056     LLVM_FALLTHROUGH;
7057   case Instruction::Add:
7058   case Instruction::FAdd:
7059   case Instruction::Sub:
7060   case Instruction::FSub:
7061   case Instruction::Mul:
7062   case Instruction::FMul:
7063   case Instruction::FDiv:
7064   case Instruction::FRem:
7065   case Instruction::Shl:
7066   case Instruction::LShr:
7067   case Instruction::AShr:
7068   case Instruction::And:
7069   case Instruction::Or:
7070   case Instruction::Xor: {
7071     // Since we will replace the stride by 1 the multiplication should go away.
7072     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7073       return 0;
7074 
7075     // Detect reduction patterns
7076     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7077       return *RedCost;
7078 
7079     // Certain instructions can be cheaper to vectorize if they have a constant
7080     // second vector operand. One example of this are shifts on x86.
7081     Value *Op2 = I->getOperand(1);
7082     TargetTransformInfo::OperandValueProperties Op2VP;
7083     TargetTransformInfo::OperandValueKind Op2VK =
7084         TTI.getOperandInfo(Op2, Op2VP);
7085     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7086       Op2VK = TargetTransformInfo::OK_UniformValue;
7087 
7088     SmallVector<const Value *, 4> Operands(I->operand_values());
7089     return TTI.getArithmeticInstrCost(
7090         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7091         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7092   }
7093   case Instruction::FNeg: {
7094     return TTI.getArithmeticInstrCost(
7095         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7096         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7097         TargetTransformInfo::OP_None, I->getOperand(0), I);
7098   }
7099   case Instruction::Select: {
7100     SelectInst *SI = cast<SelectInst>(I);
7101     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7102     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7103 
7104     const Value *Op0, *Op1;
7105     using namespace llvm::PatternMatch;
7106     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7107                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7108       // select x, y, false --> x & y
7109       // select x, true, y --> x | y
7110       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7111       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7112       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7113       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7114       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7115               Op1->getType()->getScalarSizeInBits() == 1);
7116 
7117       SmallVector<const Value *, 2> Operands{Op0, Op1};
7118       return TTI.getArithmeticInstrCost(
7119           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7120           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7121     }
7122 
7123     Type *CondTy = SI->getCondition()->getType();
7124     if (!ScalarCond)
7125       CondTy = VectorType::get(CondTy, VF);
7126 
7127     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7128     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7129       Pred = Cmp->getPredicate();
7130     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7131                                   CostKind, I);
7132   }
7133   case Instruction::ICmp:
7134   case Instruction::FCmp: {
7135     Type *ValTy = I->getOperand(0)->getType();
7136     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7137     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7138       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7139     VectorTy = ToVectorTy(ValTy, VF);
7140     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7141                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7142                                   I);
7143   }
7144   case Instruction::Store:
7145   case Instruction::Load: {
7146     ElementCount Width = VF;
7147     if (Width.isVector()) {
7148       InstWidening Decision = getWideningDecision(I, Width);
7149       assert(Decision != CM_Unknown &&
7150              "CM decision should be taken at this point");
7151       if (Decision == CM_Scalarize)
7152         Width = ElementCount::getFixed(1);
7153     }
7154     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7155     return getMemoryInstructionCost(I, VF);
7156   }
7157   case Instruction::BitCast:
7158     if (I->getType()->isPointerTy())
7159       return 0;
7160     LLVM_FALLTHROUGH;
7161   case Instruction::ZExt:
7162   case Instruction::SExt:
7163   case Instruction::FPToUI:
7164   case Instruction::FPToSI:
7165   case Instruction::FPExt:
7166   case Instruction::PtrToInt:
7167   case Instruction::IntToPtr:
7168   case Instruction::SIToFP:
7169   case Instruction::UIToFP:
7170   case Instruction::Trunc:
7171   case Instruction::FPTrunc: {
7172     // Computes the CastContextHint from a Load/Store instruction.
7173     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7174       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7175              "Expected a load or a store!");
7176 
7177       if (VF.isScalar() || !TheLoop->contains(I))
7178         return TTI::CastContextHint::Normal;
7179 
7180       switch (getWideningDecision(I, VF)) {
7181       case LoopVectorizationCostModel::CM_GatherScatter:
7182         return TTI::CastContextHint::GatherScatter;
7183       case LoopVectorizationCostModel::CM_Interleave:
7184         return TTI::CastContextHint::Interleave;
7185       case LoopVectorizationCostModel::CM_Scalarize:
7186       case LoopVectorizationCostModel::CM_Widen:
7187         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7188                                         : TTI::CastContextHint::Normal;
7189       case LoopVectorizationCostModel::CM_Widen_Reverse:
7190         return TTI::CastContextHint::Reversed;
7191       case LoopVectorizationCostModel::CM_Unknown:
7192         llvm_unreachable("Instr did not go through cost modelling?");
7193       }
7194 
7195       llvm_unreachable("Unhandled case!");
7196     };
7197 
7198     unsigned Opcode = I->getOpcode();
7199     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7200     // For Trunc, the context is the only user, which must be a StoreInst.
7201     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7202       if (I->hasOneUse())
7203         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7204           CCH = ComputeCCH(Store);
7205     }
7206     // For Z/Sext, the context is the operand, which must be a LoadInst.
7207     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7208              Opcode == Instruction::FPExt) {
7209       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7210         CCH = ComputeCCH(Load);
7211     }
7212 
7213     // We optimize the truncation of induction variables having constant
7214     // integer steps. The cost of these truncations is the same as the scalar
7215     // operation.
7216     if (isOptimizableIVTruncate(I, VF)) {
7217       auto *Trunc = cast<TruncInst>(I);
7218       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7219                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7220     }
7221 
7222     // Detect reduction patterns
7223     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7224       return *RedCost;
7225 
7226     Type *SrcScalarTy = I->getOperand(0)->getType();
7227     Type *SrcVecTy =
7228         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7229     if (canTruncateToMinimalBitwidth(I, VF)) {
7230       // This cast is going to be shrunk. This may remove the cast or it might
7231       // turn it into slightly different cast. For example, if MinBW == 16,
7232       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7233       //
7234       // Calculate the modified src and dest types.
7235       Type *MinVecTy = VectorTy;
7236       if (Opcode == Instruction::Trunc) {
7237         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7238         VectorTy =
7239             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7240       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7241         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7242         VectorTy =
7243             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7244       }
7245     }
7246 
7247     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7248   }
7249   case Instruction::Call: {
7250     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7251       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7252         return *RedCost;
7253     bool NeedToScalarize;
7254     CallInst *CI = cast<CallInst>(I);
7255     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7256     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7257       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7258       return std::min(CallCost, IntrinsicCost);
7259     }
7260     return CallCost;
7261   }
7262   case Instruction::ExtractValue:
7263     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7264   case Instruction::Alloca:
7265     // We cannot easily widen alloca to a scalable alloca, as
7266     // the result would need to be a vector of pointers.
7267     if (VF.isScalable())
7268       return InstructionCost::getInvalid();
7269     LLVM_FALLTHROUGH;
7270   default:
7271     // This opcode is unknown. Assume that it is the same as 'mul'.
7272     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7273   } // end of switch.
7274 }
7275 
7276 char LoopVectorize::ID = 0;
7277 
7278 static const char lv_name[] = "Loop Vectorization";
7279 
7280 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7281 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7282 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7283 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7284 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7285 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7286 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7287 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7288 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7289 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7290 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7291 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7292 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7293 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7294 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7295 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7296 
7297 namespace llvm {
7298 
7299 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7300 
7301 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7302                               bool VectorizeOnlyWhenForced) {
7303   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7304 }
7305 
7306 } // end namespace llvm
7307 
7308 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7309   // Check if the pointer operand of a load or store instruction is
7310   // consecutive.
7311   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7312     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7313   return false;
7314 }
7315 
7316 void LoopVectorizationCostModel::collectValuesToIgnore() {
7317   // Ignore ephemeral values.
7318   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7319 
7320   // Ignore type-promoting instructions we identified during reduction
7321   // detection.
7322   for (auto &Reduction : Legal->getReductionVars()) {
7323     const RecurrenceDescriptor &RedDes = Reduction.second;
7324     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7325     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7326   }
7327   // Ignore type-casting instructions we identified during induction
7328   // detection.
7329   for (auto &Induction : Legal->getInductionVars()) {
7330     const InductionDescriptor &IndDes = Induction.second;
7331     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7332     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7333   }
7334 }
7335 
7336 void LoopVectorizationCostModel::collectInLoopReductions() {
7337   for (auto &Reduction : Legal->getReductionVars()) {
7338     PHINode *Phi = Reduction.first;
7339     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7340 
7341     // We don't collect reductions that are type promoted (yet).
7342     if (RdxDesc.getRecurrenceType() != Phi->getType())
7343       continue;
7344 
7345     // If the target would prefer this reduction to happen "in-loop", then we
7346     // want to record it as such.
7347     unsigned Opcode = RdxDesc.getOpcode();
7348     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7349         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7350                                    TargetTransformInfo::ReductionFlags()))
7351       continue;
7352 
7353     // Check that we can correctly put the reductions into the loop, by
7354     // finding the chain of operations that leads from the phi to the loop
7355     // exit value.
7356     SmallVector<Instruction *, 4> ReductionOperations =
7357         RdxDesc.getReductionOpChain(Phi, TheLoop);
7358     bool InLoop = !ReductionOperations.empty();
7359     if (InLoop) {
7360       InLoopReductionChains[Phi] = ReductionOperations;
7361       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7362       Instruction *LastChain = Phi;
7363       for (auto *I : ReductionOperations) {
7364         InLoopReductionImmediateChains[I] = LastChain;
7365         LastChain = I;
7366       }
7367     }
7368     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7369                       << " reduction for phi: " << *Phi << "\n");
7370   }
7371 }
7372 
7373 // TODO: we could return a pair of values that specify the max VF and
7374 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7375 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7376 // doesn't have a cost model that can choose which plan to execute if
7377 // more than one is generated.
7378 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7379                                  LoopVectorizationCostModel &CM) {
7380   unsigned WidestType;
7381   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7382   return WidestVectorRegBits / WidestType;
7383 }
7384 
7385 VectorizationFactor
7386 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7387   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7388   ElementCount VF = UserVF;
7389   // Outer loop handling: They may require CFG and instruction level
7390   // transformations before even evaluating whether vectorization is profitable.
7391   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7392   // the vectorization pipeline.
7393   if (!OrigLoop->isInnermost()) {
7394     // If the user doesn't provide a vectorization factor, determine a
7395     // reasonable one.
7396     if (UserVF.isZero()) {
7397       VF = ElementCount::getFixed(determineVPlanVF(
7398           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7399               .getFixedSize(),
7400           CM));
7401       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7402 
7403       // Make sure we have a VF > 1 for stress testing.
7404       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7405         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7406                           << "overriding computed VF.\n");
7407         VF = ElementCount::getFixed(4);
7408       }
7409     }
7410     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7411     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7412            "VF needs to be a power of two");
7413     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7414                       << "VF " << VF << " to build VPlans.\n");
7415     buildVPlans(VF, VF);
7416 
7417     // For VPlan build stress testing, we bail out after VPlan construction.
7418     if (VPlanBuildStressTest)
7419       return VectorizationFactor::Disabled();
7420 
7421     return {VF, 0 /*Cost*/};
7422   }
7423 
7424   LLVM_DEBUG(
7425       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7426                 "VPlan-native path.\n");
7427   return VectorizationFactor::Disabled();
7428 }
7429 
7430 Optional<VectorizationFactor>
7431 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7432   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7433   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7434   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7435     return None;
7436 
7437   // Invalidate interleave groups if all blocks of loop will be predicated.
7438   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7439       !useMaskedInterleavedAccesses(*TTI)) {
7440     LLVM_DEBUG(
7441         dbgs()
7442         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7443            "which requires masked-interleaved support.\n");
7444     if (CM.InterleaveInfo.invalidateGroups())
7445       // Invalidating interleave groups also requires invalidating all decisions
7446       // based on them, which includes widening decisions and uniform and scalar
7447       // values.
7448       CM.invalidateCostModelingDecisions();
7449   }
7450 
7451   ElementCount MaxUserVF =
7452       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7453   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7454   if (!UserVF.isZero() && UserVFIsLegal) {
7455     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7456            "VF needs to be a power of two");
7457     // Collect the instructions (and their associated costs) that will be more
7458     // profitable to scalarize.
7459     if (CM.selectUserVectorizationFactor(UserVF)) {
7460       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7461       CM.collectInLoopReductions();
7462       buildVPlansWithVPRecipes(UserVF, UserVF);
7463       LLVM_DEBUG(printPlans(dbgs()));
7464       return {{UserVF, 0}};
7465     } else
7466       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7467                               "InvalidCost", ORE, OrigLoop);
7468   }
7469 
7470   // Populate the set of Vectorization Factor Candidates.
7471   ElementCountSet VFCandidates;
7472   for (auto VF = ElementCount::getFixed(1);
7473        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7474     VFCandidates.insert(VF);
7475   for (auto VF = ElementCount::getScalable(1);
7476        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7477     VFCandidates.insert(VF);
7478 
7479   for (const auto &VF : VFCandidates) {
7480     // Collect Uniform and Scalar instructions after vectorization with VF.
7481     CM.collectUniformsAndScalars(VF);
7482 
7483     // Collect the instructions (and their associated costs) that will be more
7484     // profitable to scalarize.
7485     if (VF.isVector())
7486       CM.collectInstsToScalarize(VF);
7487   }
7488 
7489   CM.collectInLoopReductions();
7490   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7491   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7492 
7493   LLVM_DEBUG(printPlans(dbgs()));
7494   if (!MaxFactors.hasVector())
7495     return VectorizationFactor::Disabled();
7496 
7497   // Select the optimal vectorization factor.
7498   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7499 
7500   // Check if it is profitable to vectorize with runtime checks.
7501   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7502   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7503     bool PragmaThresholdReached =
7504         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7505     bool ThresholdReached =
7506         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7507     if ((ThresholdReached && !Hints.allowReordering()) ||
7508         PragmaThresholdReached) {
7509       ORE->emit([&]() {
7510         return OptimizationRemarkAnalysisAliasing(
7511                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7512                    OrigLoop->getHeader())
7513                << "loop not vectorized: cannot prove it is safe to reorder "
7514                   "memory operations";
7515       });
7516       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7517       Hints.emitRemarkWithHints();
7518       return VectorizationFactor::Disabled();
7519     }
7520   }
7521   return SelectedVF;
7522 }
7523 
7524 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7525   assert(count_if(VPlans,
7526                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7527              1 &&
7528          "Best VF has not a single VPlan.");
7529 
7530   for (const VPlanPtr &Plan : VPlans) {
7531     if (Plan->hasVF(VF))
7532       return *Plan.get();
7533   }
7534   llvm_unreachable("No plan found!");
7535 }
7536 
7537 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7538   SmallVector<Metadata *, 4> MDs;
7539   // Reserve first location for self reference to the LoopID metadata node.
7540   MDs.push_back(nullptr);
7541   bool IsUnrollMetadata = false;
7542   MDNode *LoopID = L->getLoopID();
7543   if (LoopID) {
7544     // First find existing loop unrolling disable metadata.
7545     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7546       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7547       if (MD) {
7548         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7549         IsUnrollMetadata =
7550             S && S->getString().startswith("llvm.loop.unroll.disable");
7551       }
7552       MDs.push_back(LoopID->getOperand(i));
7553     }
7554   }
7555 
7556   if (!IsUnrollMetadata) {
7557     // Add runtime unroll disable metadata.
7558     LLVMContext &Context = L->getHeader()->getContext();
7559     SmallVector<Metadata *, 1> DisableOperands;
7560     DisableOperands.push_back(
7561         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7562     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7563     MDs.push_back(DisableNode);
7564     MDNode *NewLoopID = MDNode::get(Context, MDs);
7565     // Set operand 0 to refer to the loop id itself.
7566     NewLoopID->replaceOperandWith(0, NewLoopID);
7567     L->setLoopID(NewLoopID);
7568   }
7569 }
7570 
7571 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7572                                            VPlan &BestVPlan,
7573                                            InnerLoopVectorizer &ILV,
7574                                            DominatorTree *DT) {
7575   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7576                     << '\n');
7577 
7578   // Perform the actual loop transformation.
7579 
7580   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7581   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7582   Value *CanonicalIVStartValue;
7583   std::tie(State.CFG.VectorPreHeader, CanonicalIVStartValue) =
7584       ILV.createVectorizedLoopSkeleton();
7585   ILV.collectPoisonGeneratingRecipes(State);
7586 
7587   ILV.printDebugTracesAtStart();
7588 
7589   //===------------------------------------------------===//
7590   //
7591   // Notice: any optimization or new instruction that go
7592   // into the code below should also be implemented in
7593   // the cost-model.
7594   //
7595   //===------------------------------------------------===//
7596 
7597   // 2. Copy and widen instructions from the old loop into the new loop.
7598   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7599                              ILV.getOrCreateVectorTripCount(nullptr),
7600                              CanonicalIVStartValue, State);
7601   BestVPlan.execute(&State);
7602 
7603   // Keep all loop hints from the original loop on the vector loop (we'll
7604   // replace the vectorizer-specific hints below).
7605   MDNode *OrigLoopID = OrigLoop->getLoopID();
7606 
7607   Optional<MDNode *> VectorizedLoopID =
7608       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7609                                       LLVMLoopVectorizeFollowupVectorized});
7610 
7611   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7612   if (VectorizedLoopID.hasValue())
7613     L->setLoopID(VectorizedLoopID.getValue());
7614   else {
7615     // Keep all loop hints from the original loop on the vector loop (we'll
7616     // replace the vectorizer-specific hints below).
7617     if (MDNode *LID = OrigLoop->getLoopID())
7618       L->setLoopID(LID);
7619 
7620     LoopVectorizeHints Hints(L, true, *ORE);
7621     Hints.setAlreadyVectorized();
7622   }
7623   // Disable runtime unrolling when vectorizing the epilogue loop.
7624   if (CanonicalIVStartValue)
7625     AddRuntimeUnrollDisableMetaData(L);
7626 
7627   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7628   //    predication, updating analyses.
7629   ILV.fixVectorizedLoop(State);
7630 
7631   ILV.printDebugTracesAtEnd();
7632 }
7633 
7634 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7635 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7636   for (const auto &Plan : VPlans)
7637     if (PrintVPlansInDotFormat)
7638       Plan->printDOT(O);
7639     else
7640       Plan->print(O);
7641 }
7642 #endif
7643 
7644 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7645     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7646 
7647   // We create new control-flow for the vectorized loop, so the original exit
7648   // conditions will be dead after vectorization if it's only used by the
7649   // terminator
7650   SmallVector<BasicBlock*> ExitingBlocks;
7651   OrigLoop->getExitingBlocks(ExitingBlocks);
7652   for (auto *BB : ExitingBlocks) {
7653     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7654     if (!Cmp || !Cmp->hasOneUse())
7655       continue;
7656 
7657     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7658     if (!DeadInstructions.insert(Cmp).second)
7659       continue;
7660 
7661     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7662     // TODO: can recurse through operands in general
7663     for (Value *Op : Cmp->operands()) {
7664       if (isa<TruncInst>(Op) && Op->hasOneUse())
7665           DeadInstructions.insert(cast<Instruction>(Op));
7666     }
7667   }
7668 
7669   // We create new "steps" for induction variable updates to which the original
7670   // induction variables map. An original update instruction will be dead if
7671   // all its users except the induction variable are dead.
7672   auto *Latch = OrigLoop->getLoopLatch();
7673   for (auto &Induction : Legal->getInductionVars()) {
7674     PHINode *Ind = Induction.first;
7675     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7676 
7677     // If the tail is to be folded by masking, the primary induction variable,
7678     // if exists, isn't dead: it will be used for masking. Don't kill it.
7679     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7680       continue;
7681 
7682     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7683           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7684         }))
7685       DeadInstructions.insert(IndUpdate);
7686   }
7687 }
7688 
7689 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7690 
7691 //===--------------------------------------------------------------------===//
7692 // EpilogueVectorizerMainLoop
7693 //===--------------------------------------------------------------------===//
7694 
7695 /// This function is partially responsible for generating the control flow
7696 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7697 std::pair<BasicBlock *, Value *>
7698 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7699   MDNode *OrigLoopID = OrigLoop->getLoopID();
7700 
7701   // Workaround!  Compute the trip count of the original loop and cache it
7702   // before we start modifying the CFG.  This code has a systemic problem
7703   // wherein it tries to run analysis over partially constructed IR; this is
7704   // wrong, and not simply for SCEV.  The trip count of the original loop
7705   // simply happens to be prone to hitting this in practice.  In theory, we
7706   // can hit the same issue for any SCEV, or ValueTracking query done during
7707   // mutation.  See PR49900.
7708   getOrCreateTripCount(OrigLoop->getLoopPreheader());
7709   createVectorLoopSkeleton("");
7710 
7711   // Generate the code to check the minimum iteration count of the vector
7712   // epilogue (see below).
7713   EPI.EpilogueIterationCountCheck =
7714       emitMinimumIterationCountCheck(LoopScalarPreHeader, true);
7715   EPI.EpilogueIterationCountCheck->setName("iter.check");
7716 
7717   // Generate the code to check any assumptions that we've made for SCEV
7718   // expressions.
7719   EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader);
7720 
7721   // Generate the code that checks at runtime if arrays overlap. We put the
7722   // checks into a separate block to make the more common case of few elements
7723   // faster.
7724   EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader);
7725 
7726   // Generate the iteration count check for the main loop, *after* the check
7727   // for the epilogue loop, so that the path-length is shorter for the case
7728   // that goes directly through the vector epilogue. The longer-path length for
7729   // the main loop is compensated for, by the gain from vectorizing the larger
7730   // trip count. Note: the branch will get updated later on when we vectorize
7731   // the epilogue.
7732   EPI.MainLoopIterationCountCheck =
7733       emitMinimumIterationCountCheck(LoopScalarPreHeader, false);
7734 
7735   // Generate the induction variable.
7736   Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader);
7737   EPI.VectorTripCount = CountRoundDown;
7738 
7739   // Skip induction resume value creation here because they will be created in
7740   // the second pass. If we created them here, they wouldn't be used anyway,
7741   // because the vplan in the second pass still contains the inductions from the
7742   // original loop.
7743 
7744   return {completeLoopSkeleton(OrigLoopID), nullptr};
7745 }
7746 
7747 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7748   LLVM_DEBUG({
7749     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7750            << "Main Loop VF:" << EPI.MainLoopVF
7751            << ", Main Loop UF:" << EPI.MainLoopUF
7752            << ", Epilogue Loop VF:" << EPI.EpilogueVF
7753            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7754   });
7755 }
7756 
7757 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7758   DEBUG_WITH_TYPE(VerboseDebug, {
7759     dbgs() << "intermediate fn:\n"
7760            << *OrigLoop->getHeader()->getParent() << "\n";
7761   });
7762 }
7763 
7764 BasicBlock *
7765 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass,
7766                                                            bool ForEpilogue) {
7767   assert(Bypass && "Expected valid bypass basic block.");
7768   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
7769   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
7770   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
7771   // Reuse existing vector loop preheader for TC checks.
7772   // Note that new preheader block is generated for vector loop.
7773   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
7774   IRBuilder<> Builder(TCCheckBlock->getTerminator());
7775 
7776   // Generate code to check if the loop's trip count is less than VF * UF of the
7777   // main vector loop.
7778   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
7779       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7780 
7781   Value *CheckMinIters = Builder.CreateICmp(
7782       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
7783       "min.iters.check");
7784 
7785   if (!ForEpilogue)
7786     TCCheckBlock->setName("vector.main.loop.iter.check");
7787 
7788   // Create new preheader for vector loop.
7789   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7790                                    DT, LI, nullptr, "vector.ph");
7791 
7792   if (ForEpilogue) {
7793     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
7794                                  DT->getNode(Bypass)->getIDom()) &&
7795            "TC check is expected to dominate Bypass");
7796 
7797     // Update dominator for Bypass & LoopExit.
7798     DT->changeImmediateDominator(Bypass, TCCheckBlock);
7799     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7800       // For loops with multiple exits, there's no edge from the middle block
7801       // to exit blocks (as the epilogue must run) and thus no need to update
7802       // the immediate dominator of the exit blocks.
7803       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
7804 
7805     LoopBypassBlocks.push_back(TCCheckBlock);
7806 
7807     // Save the trip count so we don't have to regenerate it in the
7808     // vec.epilog.iter.check. This is safe to do because the trip count
7809     // generated here dominates the vector epilog iter check.
7810     EPI.TripCount = Count;
7811   }
7812 
7813   ReplaceInstWithInst(
7814       TCCheckBlock->getTerminator(),
7815       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7816 
7817   return TCCheckBlock;
7818 }
7819 
7820 //===--------------------------------------------------------------------===//
7821 // EpilogueVectorizerEpilogueLoop
7822 //===--------------------------------------------------------------------===//
7823 
7824 /// This function is partially responsible for generating the control flow
7825 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7826 std::pair<BasicBlock *, Value *>
7827 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
7828   MDNode *OrigLoopID = OrigLoop->getLoopID();
7829   createVectorLoopSkeleton("vec.epilog.");
7830 
7831   // Now, compare the remaining count and if there aren't enough iterations to
7832   // execute the vectorized epilogue skip to the scalar part.
7833   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
7834   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
7835   LoopVectorPreHeader =
7836       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
7837                  LI, nullptr, "vec.epilog.ph");
7838   emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader,
7839                                           VecEpilogueIterationCountCheck);
7840 
7841   // Adjust the control flow taking the state info from the main loop
7842   // vectorization into account.
7843   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7844          "expected this to be saved from the previous pass.");
7845   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
7846       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
7847 
7848   DT->changeImmediateDominator(LoopVectorPreHeader,
7849                                EPI.MainLoopIterationCountCheck);
7850 
7851   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
7852       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7853 
7854   if (EPI.SCEVSafetyCheck)
7855     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
7856         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7857   if (EPI.MemSafetyCheck)
7858     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
7859         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7860 
7861   DT->changeImmediateDominator(
7862       VecEpilogueIterationCountCheck,
7863       VecEpilogueIterationCountCheck->getSinglePredecessor());
7864 
7865   DT->changeImmediateDominator(LoopScalarPreHeader,
7866                                EPI.EpilogueIterationCountCheck);
7867   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7868     // If there is an epilogue which must run, there's no edge from the
7869     // middle block to exit blocks  and thus no need to update the immediate
7870     // dominator of the exit blocks.
7871     DT->changeImmediateDominator(LoopExitBlock,
7872                                  EPI.EpilogueIterationCountCheck);
7873 
7874   // Keep track of bypass blocks, as they feed start values to the induction
7875   // phis in the scalar loop preheader.
7876   if (EPI.SCEVSafetyCheck)
7877     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
7878   if (EPI.MemSafetyCheck)
7879     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
7880   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
7881 
7882   // The vec.epilog.iter.check block may contain Phi nodes from reductions which
7883   // merge control-flow from the latch block and the middle block. Update the
7884   // incoming values here and move the Phi into the preheader.
7885   SmallVector<PHINode *, 4> PhisInBlock;
7886   for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
7887     PhisInBlock.push_back(&Phi);
7888 
7889   for (PHINode *Phi : PhisInBlock) {
7890     Phi->replaceIncomingBlockWith(
7891         VecEpilogueIterationCountCheck->getSinglePredecessor(),
7892         VecEpilogueIterationCountCheck);
7893     Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
7894     if (EPI.SCEVSafetyCheck)
7895       Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
7896     if (EPI.MemSafetyCheck)
7897       Phi->removeIncomingValue(EPI.MemSafetyCheck);
7898     Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
7899   }
7900 
7901   // Generate a resume induction for the vector epilogue and put it in the
7902   // vector epilogue preheader
7903   Type *IdxTy = Legal->getWidestInductionType();
7904   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
7905                                          LoopVectorPreHeader->getFirstNonPHI());
7906   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
7907   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
7908                            EPI.MainLoopIterationCountCheck);
7909 
7910   // Generate induction resume values. These variables save the new starting
7911   // indexes for the scalar loop. They are used to test if there are any tail
7912   // iterations left once the vector loop has completed.
7913   // Note that when the vectorized epilogue is skipped due to iteration count
7914   // check, then the resume value for the induction variable comes from
7915   // the trip count of the main vector loop, hence passing the AdditionalBypass
7916   // argument.
7917   createInductionResumeValues({VecEpilogueIterationCountCheck,
7918                                EPI.VectorTripCount} /* AdditionalBypass */);
7919 
7920   return {completeLoopSkeleton(OrigLoopID), EPResumeVal};
7921 }
7922 
7923 BasicBlock *
7924 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
7925     BasicBlock *Bypass, BasicBlock *Insert) {
7926 
7927   assert(EPI.TripCount &&
7928          "Expected trip count to have been safed in the first pass.");
7929   assert(
7930       (!isa<Instruction>(EPI.TripCount) ||
7931        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
7932       "saved trip count does not dominate insertion point.");
7933   Value *TC = EPI.TripCount;
7934   IRBuilder<> Builder(Insert->getTerminator());
7935   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
7936 
7937   // Generate code to check if the loop's trip count is less than VF * UF of the
7938   // vector epilogue loop.
7939   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
7940       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7941 
7942   Value *CheckMinIters =
7943       Builder.CreateICmp(P, Count,
7944                          createStepForVF(Builder, Count->getType(),
7945                                          EPI.EpilogueVF, EPI.EpilogueUF),
7946                          "min.epilog.iters.check");
7947 
7948   ReplaceInstWithInst(
7949       Insert->getTerminator(),
7950       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7951 
7952   LoopBypassBlocks.push_back(Insert);
7953   return Insert;
7954 }
7955 
7956 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
7957   LLVM_DEBUG({
7958     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7959            << "Epilogue Loop VF:" << EPI.EpilogueVF
7960            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7961   });
7962 }
7963 
7964 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
7965   DEBUG_WITH_TYPE(VerboseDebug, {
7966     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7967   });
7968 }
7969 
7970 bool LoopVectorizationPlanner::getDecisionAndClampRange(
7971     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
7972   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
7973   bool PredicateAtRangeStart = Predicate(Range.Start);
7974 
7975   for (ElementCount TmpVF = Range.Start * 2;
7976        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
7977     if (Predicate(TmpVF) != PredicateAtRangeStart) {
7978       Range.End = TmpVF;
7979       break;
7980     }
7981 
7982   return PredicateAtRangeStart;
7983 }
7984 
7985 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
7986 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
7987 /// of VF's starting at a given VF and extending it as much as possible. Each
7988 /// vectorization decision can potentially shorten this sub-range during
7989 /// buildVPlan().
7990 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
7991                                            ElementCount MaxVF) {
7992   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
7993   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
7994     VFRange SubRange = {VF, MaxVFPlusOne};
7995     VPlans.push_back(buildVPlan(SubRange));
7996     VF = SubRange.End;
7997   }
7998 }
7999 
8000 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8001                                          VPlanPtr &Plan) {
8002   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8003 
8004   // Look for cached value.
8005   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8006   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8007   if (ECEntryIt != EdgeMaskCache.end())
8008     return ECEntryIt->second;
8009 
8010   VPValue *SrcMask = createBlockInMask(Src, Plan);
8011 
8012   // The terminator has to be a branch inst!
8013   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8014   assert(BI && "Unexpected terminator found");
8015 
8016   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8017     return EdgeMaskCache[Edge] = SrcMask;
8018 
8019   // If source is an exiting block, we know the exit edge is dynamically dead
8020   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8021   // adding uses of an otherwise potentially dead instruction.
8022   if (OrigLoop->isLoopExiting(Src))
8023     return EdgeMaskCache[Edge] = SrcMask;
8024 
8025   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8026   assert(EdgeMask && "No Edge Mask found for condition");
8027 
8028   if (BI->getSuccessor(0) != Dst)
8029     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8030 
8031   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8032     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8033     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8034     // The select version does not introduce new UB if SrcMask is false and
8035     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8036     VPValue *False = Plan->getOrAddVPValue(
8037         ConstantInt::getFalse(BI->getCondition()->getType()));
8038     EdgeMask =
8039         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8040   }
8041 
8042   return EdgeMaskCache[Edge] = EdgeMask;
8043 }
8044 
8045 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8046   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8047 
8048   // Look for cached value.
8049   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8050   if (BCEntryIt != BlockMaskCache.end())
8051     return BCEntryIt->second;
8052 
8053   // All-one mask is modelled as no-mask following the convention for masked
8054   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8055   VPValue *BlockMask = nullptr;
8056 
8057   if (OrigLoop->getHeader() == BB) {
8058     if (!CM.blockNeedsPredicationForAnyReason(BB))
8059       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8060 
8061     // Introduce the early-exit compare IV <= BTC to form header block mask.
8062     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8063     // constructing the desired canonical IV in the header block as its first
8064     // non-phi instructions.
8065     assert(CM.foldTailByMasking() && "must fold the tail");
8066     VPBasicBlock *HeaderVPBB =
8067         Plan->getVectorLoopRegion()->getEntryBasicBlock();
8068     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8069     auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8070     HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
8071 
8072     VPBuilder::InsertPointGuard Guard(Builder);
8073     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8074     if (CM.TTI.emitGetActiveLaneMask()) {
8075       VPValue *TC = Plan->getOrCreateTripCount();
8076       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8077     } else {
8078       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8079       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8080     }
8081     return BlockMaskCache[BB] = BlockMask;
8082   }
8083 
8084   // This is the block mask. We OR all incoming edges.
8085   for (auto *Predecessor : predecessors(BB)) {
8086     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8087     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8088       return BlockMaskCache[BB] = EdgeMask;
8089 
8090     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8091       BlockMask = EdgeMask;
8092       continue;
8093     }
8094 
8095     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8096   }
8097 
8098   return BlockMaskCache[BB] = BlockMask;
8099 }
8100 
8101 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8102                                                 ArrayRef<VPValue *> Operands,
8103                                                 VFRange &Range,
8104                                                 VPlanPtr &Plan) {
8105   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8106          "Must be called with either a load or store");
8107 
8108   auto willWiden = [&](ElementCount VF) -> bool {
8109     if (VF.isScalar())
8110       return false;
8111     LoopVectorizationCostModel::InstWidening Decision =
8112         CM.getWideningDecision(I, VF);
8113     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8114            "CM decision should be taken at this point.");
8115     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8116       return true;
8117     if (CM.isScalarAfterVectorization(I, VF) ||
8118         CM.isProfitableToScalarize(I, VF))
8119       return false;
8120     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8121   };
8122 
8123   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8124     return nullptr;
8125 
8126   VPValue *Mask = nullptr;
8127   if (Legal->isMaskRequired(I))
8128     Mask = createBlockInMask(I->getParent(), Plan);
8129 
8130   // Determine if the pointer operand of the access is either consecutive or
8131   // reverse consecutive.
8132   LoopVectorizationCostModel::InstWidening Decision =
8133       CM.getWideningDecision(I, Range.Start);
8134   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8135   bool Consecutive =
8136       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8137 
8138   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8139     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8140                                               Consecutive, Reverse);
8141 
8142   StoreInst *Store = cast<StoreInst>(I);
8143   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8144                                             Mask, Consecutive, Reverse);
8145 }
8146 
8147 static VPWidenIntOrFpInductionRecipe *
8148 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc,
8149                            VPValue *Start, const InductionDescriptor &IndDesc,
8150                            LoopVectorizationCostModel &CM, ScalarEvolution &SE,
8151                            Loop &OrigLoop, VFRange &Range) {
8152   // Returns true if an instruction \p I should be scalarized instead of
8153   // vectorized for the chosen vectorization factor.
8154   auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
8155     return CM.isScalarAfterVectorization(I, VF) ||
8156            CM.isProfitableToScalarize(I, VF);
8157   };
8158 
8159   bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange(
8160       [&](ElementCount VF) {
8161         // Returns true if we should generate a scalar version of \p IV.
8162         if (ShouldScalarizeInstruction(PhiOrTrunc, VF))
8163           return true;
8164         auto isScalarInst = [&](User *U) -> bool {
8165           auto *I = cast<Instruction>(U);
8166           return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF);
8167         };
8168         return any_of(PhiOrTrunc->users(), isScalarInst);
8169       },
8170       Range);
8171   bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
8172       [&](ElementCount VF) {
8173         return ShouldScalarizeInstruction(PhiOrTrunc, VF);
8174       },
8175       Range);
8176   assert(IndDesc.getStartValue() ==
8177          Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
8178   assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
8179          "step must be loop invariant");
8180   if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8181     return new VPWidenIntOrFpInductionRecipe(
8182         Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE);
8183   }
8184   assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
8185   return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV,
8186                                            !NeedsScalarIVOnly, SE);
8187 }
8188 
8189 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI(
8190     PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const {
8191 
8192   // Check if this is an integer or fp induction. If so, build the recipe that
8193   // produces its scalar and vector values.
8194   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
8195     return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM,
8196                                       *PSE.getSE(), *OrigLoop, Range);
8197 
8198   // Check if this is pointer induction. If so, build the recipe for it.
8199   if (auto *II = Legal->getPointerInductionDescriptor(Phi))
8200     return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II,
8201                                              *PSE.getSE());
8202   return nullptr;
8203 }
8204 
8205 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8206     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8207     VPlan &Plan) const {
8208   // Optimize the special case where the source is a constant integer
8209   // induction variable. Notice that we can only optimize the 'trunc' case
8210   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8211   // (c) other casts depend on pointer size.
8212 
8213   // Determine whether \p K is a truncation based on an induction variable that
8214   // can be optimized.
8215   auto isOptimizableIVTruncate =
8216       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8217     return [=](ElementCount VF) -> bool {
8218       return CM.isOptimizableIVTruncate(K, VF);
8219     };
8220   };
8221 
8222   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8223           isOptimizableIVTruncate(I), Range)) {
8224 
8225     auto *Phi = cast<PHINode>(I->getOperand(0));
8226     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8227     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8228     return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(),
8229                                       *OrigLoop, Range);
8230   }
8231   return nullptr;
8232 }
8233 
8234 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8235                                                 ArrayRef<VPValue *> Operands,
8236                                                 VPlanPtr &Plan) {
8237   // If all incoming values are equal, the incoming VPValue can be used directly
8238   // instead of creating a new VPBlendRecipe.
8239   VPValue *FirstIncoming = Operands[0];
8240   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8241         return FirstIncoming == Inc;
8242       })) {
8243     return Operands[0];
8244   }
8245 
8246   unsigned NumIncoming = Phi->getNumIncomingValues();
8247   // For in-loop reductions, we do not need to create an additional select.
8248   VPValue *InLoopVal = nullptr;
8249   for (unsigned In = 0; In < NumIncoming; In++) {
8250     PHINode *PhiOp =
8251         dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue());
8252     if (PhiOp && CM.isInLoopReduction(PhiOp)) {
8253       assert(!InLoopVal && "Found more than one in-loop reduction!");
8254       InLoopVal = Operands[In];
8255     }
8256   }
8257 
8258   assert((!InLoopVal || NumIncoming == 2) &&
8259          "Found an in-loop reduction for PHI with unexpected number of "
8260          "incoming values");
8261   if (InLoopVal)
8262     return Operands[Operands[0] == InLoopVal ? 1 : 0];
8263 
8264   // We know that all PHIs in non-header blocks are converted into selects, so
8265   // we don't have to worry about the insertion order and we can just use the
8266   // builder. At this point we generate the predication tree. There may be
8267   // duplications since this is a simple recursive scan, but future
8268   // optimizations will clean it up.
8269   SmallVector<VPValue *, 2> OperandsWithMask;
8270 
8271   for (unsigned In = 0; In < NumIncoming; In++) {
8272     VPValue *EdgeMask =
8273       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8274     assert((EdgeMask || NumIncoming == 1) &&
8275            "Multiple predecessors with one having a full mask");
8276     OperandsWithMask.push_back(Operands[In]);
8277     if (EdgeMask)
8278       OperandsWithMask.push_back(EdgeMask);
8279   }
8280   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8281 }
8282 
8283 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8284                                                    ArrayRef<VPValue *> Operands,
8285                                                    VFRange &Range) const {
8286 
8287   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8288       [this, CI](ElementCount VF) {
8289         return CM.isScalarWithPredication(CI, VF);
8290       },
8291       Range);
8292 
8293   if (IsPredicated)
8294     return nullptr;
8295 
8296   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8297   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8298              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8299              ID == Intrinsic::pseudoprobe ||
8300              ID == Intrinsic::experimental_noalias_scope_decl))
8301     return nullptr;
8302 
8303   auto willWiden = [&](ElementCount VF) -> bool {
8304     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8305     // The following case may be scalarized depending on the VF.
8306     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8307     // version of the instruction.
8308     // Is it beneficial to perform intrinsic call compared to lib call?
8309     bool NeedToScalarize = false;
8310     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8311     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8312     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8313     return UseVectorIntrinsic || !NeedToScalarize;
8314   };
8315 
8316   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8317     return nullptr;
8318 
8319   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8320   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8321 }
8322 
8323 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8324   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8325          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8326   // Instruction should be widened, unless it is scalar after vectorization,
8327   // scalarization is profitable or it is predicated.
8328   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8329     return CM.isScalarAfterVectorization(I, VF) ||
8330            CM.isProfitableToScalarize(I, VF) ||
8331            CM.isScalarWithPredication(I, VF);
8332   };
8333   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8334                                                              Range);
8335 }
8336 
8337 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8338                                            ArrayRef<VPValue *> Operands) const {
8339   auto IsVectorizableOpcode = [](unsigned Opcode) {
8340     switch (Opcode) {
8341     case Instruction::Add:
8342     case Instruction::And:
8343     case Instruction::AShr:
8344     case Instruction::BitCast:
8345     case Instruction::FAdd:
8346     case Instruction::FCmp:
8347     case Instruction::FDiv:
8348     case Instruction::FMul:
8349     case Instruction::FNeg:
8350     case Instruction::FPExt:
8351     case Instruction::FPToSI:
8352     case Instruction::FPToUI:
8353     case Instruction::FPTrunc:
8354     case Instruction::FRem:
8355     case Instruction::FSub:
8356     case Instruction::ICmp:
8357     case Instruction::IntToPtr:
8358     case Instruction::LShr:
8359     case Instruction::Mul:
8360     case Instruction::Or:
8361     case Instruction::PtrToInt:
8362     case Instruction::SDiv:
8363     case Instruction::Select:
8364     case Instruction::SExt:
8365     case Instruction::Shl:
8366     case Instruction::SIToFP:
8367     case Instruction::SRem:
8368     case Instruction::Sub:
8369     case Instruction::Trunc:
8370     case Instruction::UDiv:
8371     case Instruction::UIToFP:
8372     case Instruction::URem:
8373     case Instruction::Xor:
8374     case Instruction::ZExt:
8375       return true;
8376     }
8377     return false;
8378   };
8379 
8380   if (!IsVectorizableOpcode(I->getOpcode()))
8381     return nullptr;
8382 
8383   // Success: widen this instruction.
8384   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8385 }
8386 
8387 void VPRecipeBuilder::fixHeaderPhis() {
8388   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8389   for (VPHeaderPHIRecipe *R : PhisToFix) {
8390     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8391     VPRecipeBase *IncR =
8392         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8393     R->addOperand(IncR->getVPSingleValue());
8394   }
8395 }
8396 
8397 VPBasicBlock *VPRecipeBuilder::handleReplication(
8398     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8399     VPlanPtr &Plan) {
8400   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8401       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8402       Range);
8403 
8404   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8405       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8406       Range);
8407 
8408   // Even if the instruction is not marked as uniform, there are certain
8409   // intrinsic calls that can be effectively treated as such, so we check for
8410   // them here. Conservatively, we only do this for scalable vectors, since
8411   // for fixed-width VFs we can always fall back on full scalarization.
8412   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8413     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8414     case Intrinsic::assume:
8415     case Intrinsic::lifetime_start:
8416     case Intrinsic::lifetime_end:
8417       // For scalable vectors if one of the operands is variant then we still
8418       // want to mark as uniform, which will generate one instruction for just
8419       // the first lane of the vector. We can't scalarize the call in the same
8420       // way as for fixed-width vectors because we don't know how many lanes
8421       // there are.
8422       //
8423       // The reasons for doing it this way for scalable vectors are:
8424       //   1. For the assume intrinsic generating the instruction for the first
8425       //      lane is still be better than not generating any at all. For
8426       //      example, the input may be a splat across all lanes.
8427       //   2. For the lifetime start/end intrinsics the pointer operand only
8428       //      does anything useful when the input comes from a stack object,
8429       //      which suggests it should always be uniform. For non-stack objects
8430       //      the effect is to poison the object, which still allows us to
8431       //      remove the call.
8432       IsUniform = true;
8433       break;
8434     default:
8435       break;
8436     }
8437   }
8438 
8439   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8440                                        IsUniform, IsPredicated);
8441   setRecipe(I, Recipe);
8442   Plan->addVPValue(I, Recipe);
8443 
8444   // Find if I uses a predicated instruction. If so, it will use its scalar
8445   // value. Avoid hoisting the insert-element which packs the scalar value into
8446   // a vector value, as that happens iff all users use the vector value.
8447   for (VPValue *Op : Recipe->operands()) {
8448     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8449     if (!PredR)
8450       continue;
8451     auto *RepR =
8452         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8453     assert(RepR->isPredicated() &&
8454            "expected Replicate recipe to be predicated");
8455     RepR->setAlsoPack(false);
8456   }
8457 
8458   // Finalize the recipe for Instr, first if it is not predicated.
8459   if (!IsPredicated) {
8460     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8461     VPBB->appendRecipe(Recipe);
8462     return VPBB;
8463   }
8464   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8465 
8466   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8467   assert(SingleSucc && "VPBB must have a single successor when handling "
8468                        "predicated replication.");
8469   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8470   // Record predicated instructions for above packing optimizations.
8471   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8472   VPBlockUtils::insertBlockAfter(Region, VPBB);
8473   auto *RegSucc = new VPBasicBlock();
8474   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8475   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8476   return RegSucc;
8477 }
8478 
8479 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8480                                                       VPRecipeBase *PredRecipe,
8481                                                       VPlanPtr &Plan) {
8482   // Instructions marked for predication are replicated and placed under an
8483   // if-then construct to prevent side-effects.
8484 
8485   // Generate recipes to compute the block mask for this region.
8486   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8487 
8488   // Build the triangular if-then region.
8489   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8490   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8491   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8492   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8493   auto *PHIRecipe = Instr->getType()->isVoidTy()
8494                         ? nullptr
8495                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8496   if (PHIRecipe) {
8497     Plan->removeVPValueFor(Instr);
8498     Plan->addVPValue(Instr, PHIRecipe);
8499   }
8500   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8501   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8502   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8503 
8504   // Note: first set Entry as region entry and then connect successors starting
8505   // from it in order, to propagate the "parent" of each VPBasicBlock.
8506   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8507   VPBlockUtils::connectBlocks(Pred, Exit);
8508 
8509   return Region;
8510 }
8511 
8512 VPRecipeOrVPValueTy
8513 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8514                                         ArrayRef<VPValue *> Operands,
8515                                         VFRange &Range, VPlanPtr &Plan) {
8516   // First, check for specific widening recipes that deal with calls, memory
8517   // operations, inductions and Phi nodes.
8518   if (auto *CI = dyn_cast<CallInst>(Instr))
8519     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8520 
8521   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8522     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8523 
8524   VPRecipeBase *Recipe;
8525   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8526     if (Phi->getParent() != OrigLoop->getHeader())
8527       return tryToBlend(Phi, Operands, Plan);
8528     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8529       return toVPRecipeResult(Recipe);
8530 
8531     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8532     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8533       VPValue *StartV = Operands[0];
8534       if (Legal->isReductionVariable(Phi)) {
8535         const RecurrenceDescriptor &RdxDesc =
8536             Legal->getReductionVars().find(Phi)->second;
8537         assert(RdxDesc.getRecurrenceStartValue() ==
8538                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8539         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8540                                              CM.isInLoopReduction(Phi),
8541                                              CM.useOrderedReductions(RdxDesc));
8542       } else {
8543         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8544       }
8545 
8546       // Record the incoming value from the backedge, so we can add the incoming
8547       // value from the backedge after all recipes have been created.
8548       recordRecipeOf(cast<Instruction>(
8549           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8550       PhisToFix.push_back(PhiRecipe);
8551     } else {
8552       // TODO: record backedge value for remaining pointer induction phis.
8553       assert(Phi->getType()->isPointerTy() &&
8554              "only pointer phis should be handled here");
8555       assert(Legal->getInductionVars().count(Phi) &&
8556              "Not an induction variable");
8557       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8558       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8559       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8560     }
8561 
8562     return toVPRecipeResult(PhiRecipe);
8563   }
8564 
8565   if (isa<TruncInst>(Instr) &&
8566       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8567                                                Range, *Plan)))
8568     return toVPRecipeResult(Recipe);
8569 
8570   if (!shouldWiden(Instr, Range))
8571     return nullptr;
8572 
8573   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8574     return toVPRecipeResult(new VPWidenGEPRecipe(
8575         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8576 
8577   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8578     bool InvariantCond =
8579         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8580     return toVPRecipeResult(new VPWidenSelectRecipe(
8581         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8582   }
8583 
8584   return toVPRecipeResult(tryToWiden(Instr, Operands));
8585 }
8586 
8587 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8588                                                         ElementCount MaxVF) {
8589   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8590 
8591   // Collect instructions from the original loop that will become trivially dead
8592   // in the vectorized loop. We don't need to vectorize these instructions. For
8593   // example, original induction update instructions can become dead because we
8594   // separately emit induction "steps" when generating code for the new loop.
8595   // Similarly, we create a new latch condition when setting up the structure
8596   // of the new loop, so the old one can become dead.
8597   SmallPtrSet<Instruction *, 4> DeadInstructions;
8598   collectTriviallyDeadInstructions(DeadInstructions);
8599 
8600   // Add assume instructions we need to drop to DeadInstructions, to prevent
8601   // them from being added to the VPlan.
8602   // TODO: We only need to drop assumes in blocks that get flattend. If the
8603   // control flow is preserved, we should keep them.
8604   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8605   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8606 
8607   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8608   // Dead instructions do not need sinking. Remove them from SinkAfter.
8609   for (Instruction *I : DeadInstructions)
8610     SinkAfter.erase(I);
8611 
8612   // Cannot sink instructions after dead instructions (there won't be any
8613   // recipes for them). Instead, find the first non-dead previous instruction.
8614   for (auto &P : Legal->getSinkAfter()) {
8615     Instruction *SinkTarget = P.second;
8616     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8617     (void)FirstInst;
8618     while (DeadInstructions.contains(SinkTarget)) {
8619       assert(
8620           SinkTarget != FirstInst &&
8621           "Must find a live instruction (at least the one feeding the "
8622           "first-order recurrence PHI) before reaching beginning of the block");
8623       SinkTarget = SinkTarget->getPrevNode();
8624       assert(SinkTarget != P.first &&
8625              "sink source equals target, no sinking required");
8626     }
8627     P.second = SinkTarget;
8628   }
8629 
8630   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8631   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8632     VFRange SubRange = {VF, MaxVFPlusOne};
8633     VPlans.push_back(
8634         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8635     VF = SubRange.End;
8636   }
8637 }
8638 
8639 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8640 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8641 // BranchOnCount VPInstruction to the latch.
8642 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8643                                   bool HasNUW, bool IsVPlanNative) {
8644   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8645   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8646 
8647   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8648   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8649   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8650   if (IsVPlanNative)
8651     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8652   Header->insert(CanonicalIVPHI, Header->begin());
8653 
8654   auto *CanonicalIVIncrement =
8655       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8656                                : VPInstruction::CanonicalIVIncrement,
8657                         {CanonicalIVPHI}, DL);
8658   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8659 
8660   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8661   if (IsVPlanNative) {
8662     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8663     EB->setCondBit(nullptr);
8664   }
8665   EB->appendRecipe(CanonicalIVIncrement);
8666 
8667   auto *BranchOnCount =
8668       new VPInstruction(VPInstruction::BranchOnCount,
8669                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8670   EB->appendRecipe(BranchOnCount);
8671 }
8672 
8673 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8674     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8675     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8676 
8677   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8678 
8679   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8680 
8681   // ---------------------------------------------------------------------------
8682   // Pre-construction: record ingredients whose recipes we'll need to further
8683   // process after constructing the initial VPlan.
8684   // ---------------------------------------------------------------------------
8685 
8686   // Mark instructions we'll need to sink later and their targets as
8687   // ingredients whose recipe we'll need to record.
8688   for (auto &Entry : SinkAfter) {
8689     RecipeBuilder.recordRecipeOf(Entry.first);
8690     RecipeBuilder.recordRecipeOf(Entry.second);
8691   }
8692   for (auto &Reduction : CM.getInLoopReductionChains()) {
8693     PHINode *Phi = Reduction.first;
8694     RecurKind Kind =
8695         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8696     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8697 
8698     RecipeBuilder.recordRecipeOf(Phi);
8699     for (auto &R : ReductionOperations) {
8700       RecipeBuilder.recordRecipeOf(R);
8701       // For min/max reductions, where we have a pair of icmp/select, we also
8702       // need to record the ICmp recipe, so it can be removed later.
8703       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8704              "Only min/max recurrences allowed for inloop reductions");
8705       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8706         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8707     }
8708   }
8709 
8710   // For each interleave group which is relevant for this (possibly trimmed)
8711   // Range, add it to the set of groups to be later applied to the VPlan and add
8712   // placeholders for its members' Recipes which we'll be replacing with a
8713   // single VPInterleaveRecipe.
8714   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8715     auto applyIG = [IG, this](ElementCount VF) -> bool {
8716       return (VF.isVector() && // Query is illegal for VF == 1
8717               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8718                   LoopVectorizationCostModel::CM_Interleave);
8719     };
8720     if (!getDecisionAndClampRange(applyIG, Range))
8721       continue;
8722     InterleaveGroups.insert(IG);
8723     for (unsigned i = 0; i < IG->getFactor(); i++)
8724       if (Instruction *Member = IG->getMember(i))
8725         RecipeBuilder.recordRecipeOf(Member);
8726   };
8727 
8728   // ---------------------------------------------------------------------------
8729   // Build initial VPlan: Scan the body of the loop in a topological order to
8730   // visit each basic block after having visited its predecessor basic blocks.
8731   // ---------------------------------------------------------------------------
8732 
8733   // Create initial VPlan skeleton, with separate header and latch blocks.
8734   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
8735   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
8736   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
8737   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
8738   auto Plan = std::make_unique<VPlan>(TopRegion);
8739 
8740   Instruction *DLInst =
8741       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
8742   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
8743                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
8744                         !CM.foldTailByMasking(), false);
8745 
8746   // Scan the body of the loop in a topological order to visit each basic block
8747   // after having visited its predecessor basic blocks.
8748   LoopBlocksDFS DFS(OrigLoop);
8749   DFS.perform(LI);
8750 
8751   VPBasicBlock *VPBB = HeaderVPBB;
8752   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
8753   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8754     // Relevant instructions from basic block BB will be grouped into VPRecipe
8755     // ingredients and fill a new VPBasicBlock.
8756     unsigned VPBBsForBB = 0;
8757     VPBB->setName(BB->getName());
8758     Builder.setInsertPoint(VPBB);
8759 
8760     // Introduce each ingredient into VPlan.
8761     // TODO: Model and preserve debug instrinsics in VPlan.
8762     for (Instruction &I : BB->instructionsWithoutDebug()) {
8763       Instruction *Instr = &I;
8764 
8765       // First filter out irrelevant instructions, to ensure no recipes are
8766       // built for them.
8767       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8768         continue;
8769 
8770       SmallVector<VPValue *, 4> Operands;
8771       auto *Phi = dyn_cast<PHINode>(Instr);
8772       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
8773         Operands.push_back(Plan->getOrAddVPValue(
8774             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
8775       } else {
8776         auto OpRange = Plan->mapToVPValues(Instr->operands());
8777         Operands = {OpRange.begin(), OpRange.end()};
8778       }
8779       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
8780               Instr, Operands, Range, Plan)) {
8781         // If Instr can be simplified to an existing VPValue, use it.
8782         if (RecipeOrValue.is<VPValue *>()) {
8783           auto *VPV = RecipeOrValue.get<VPValue *>();
8784           Plan->addVPValue(Instr, VPV);
8785           // If the re-used value is a recipe, register the recipe for the
8786           // instruction, in case the recipe for Instr needs to be recorded.
8787           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
8788             RecipeBuilder.setRecipe(Instr, R);
8789           continue;
8790         }
8791         // Otherwise, add the new recipe.
8792         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8793         for (auto *Def : Recipe->definedValues()) {
8794           auto *UV = Def->getUnderlyingValue();
8795           Plan->addVPValue(UV, Def);
8796         }
8797 
8798         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
8799             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
8800           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
8801           // of the header block. That can happen for truncates of induction
8802           // variables. Those recipes are moved to the phi section of the header
8803           // block after applying SinkAfter, which relies on the original
8804           // position of the trunc.
8805           assert(isa<TruncInst>(Instr));
8806           InductionsToMove.push_back(
8807               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
8808         }
8809         RecipeBuilder.setRecipe(Instr, Recipe);
8810         VPBB->appendRecipe(Recipe);
8811         continue;
8812       }
8813 
8814       // Otherwise, if all widening options failed, Instruction is to be
8815       // replicated. This may create a successor for VPBB.
8816       VPBasicBlock *NextVPBB =
8817           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
8818       if (NextVPBB != VPBB) {
8819         VPBB = NextVPBB;
8820         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8821                                     : "");
8822       }
8823     }
8824 
8825     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
8826     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
8827   }
8828 
8829   // Fold the last, empty block into its predecessor.
8830   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
8831   assert(VPBB && "expected to fold last (empty) block");
8832   // After here, VPBB should not be used.
8833   VPBB = nullptr;
8834 
8835   assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8836          !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8837          "entry block must be set to a VPRegionBlock having a non-empty entry "
8838          "VPBasicBlock");
8839   RecipeBuilder.fixHeaderPhis();
8840 
8841   // ---------------------------------------------------------------------------
8842   // Transform initial VPlan: Apply previously taken decisions, in order, to
8843   // bring the VPlan to its final state.
8844   // ---------------------------------------------------------------------------
8845 
8846   // Apply Sink-After legal constraints.
8847   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
8848     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
8849     if (Region && Region->isReplicator()) {
8850       assert(Region->getNumSuccessors() == 1 &&
8851              Region->getNumPredecessors() == 1 && "Expected SESE region!");
8852       assert(R->getParent()->size() == 1 &&
8853              "A recipe in an original replicator region must be the only "
8854              "recipe in its block");
8855       return Region;
8856     }
8857     return nullptr;
8858   };
8859   for (auto &Entry : SinkAfter) {
8860     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8861     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8862 
8863     auto *TargetRegion = GetReplicateRegion(Target);
8864     auto *SinkRegion = GetReplicateRegion(Sink);
8865     if (!SinkRegion) {
8866       // If the sink source is not a replicate region, sink the recipe directly.
8867       if (TargetRegion) {
8868         // The target is in a replication region, make sure to move Sink to
8869         // the block after it, not into the replication region itself.
8870         VPBasicBlock *NextBlock =
8871             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
8872         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8873       } else
8874         Sink->moveAfter(Target);
8875       continue;
8876     }
8877 
8878     // The sink source is in a replicate region. Unhook the region from the CFG.
8879     auto *SinkPred = SinkRegion->getSinglePredecessor();
8880     auto *SinkSucc = SinkRegion->getSingleSuccessor();
8881     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
8882     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
8883     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
8884 
8885     if (TargetRegion) {
8886       // The target recipe is also in a replicate region, move the sink region
8887       // after the target region.
8888       auto *TargetSucc = TargetRegion->getSingleSuccessor();
8889       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
8890       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
8891       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
8892     } else {
8893       // The sink source is in a replicate region, we need to move the whole
8894       // replicate region, which should only contain a single recipe in the
8895       // main block.
8896       auto *SplitBlock =
8897           Target->getParent()->splitAt(std::next(Target->getIterator()));
8898 
8899       auto *SplitPred = SplitBlock->getSinglePredecessor();
8900 
8901       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
8902       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
8903       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
8904     }
8905   }
8906 
8907   VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
8908   VPlanTransforms::removeRedundantInductionCasts(*Plan);
8909 
8910   // Now that sink-after is done, move induction recipes for optimized truncates
8911   // to the phi section of the header block.
8912   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
8913     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8914 
8915   // Adjust the recipes for any inloop reductions.
8916   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
8917                              RecipeBuilder, Range.Start);
8918 
8919   // Introduce a recipe to combine the incoming and previous values of a
8920   // first-order recurrence.
8921   for (VPRecipeBase &R :
8922        Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8923     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
8924     if (!RecurPhi)
8925       continue;
8926 
8927     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
8928     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
8929     auto *Region = GetReplicateRegion(PrevRecipe);
8930     if (Region)
8931       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
8932     if (Region || PrevRecipe->isPhi())
8933       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
8934     else
8935       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
8936 
8937     auto *RecurSplice = cast<VPInstruction>(
8938         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
8939                              {RecurPhi, RecurPhi->getBackedgeValue()}));
8940 
8941     RecurPhi->replaceAllUsesWith(RecurSplice);
8942     // Set the first operand of RecurSplice to RecurPhi again, after replacing
8943     // all users.
8944     RecurSplice->setOperand(0, RecurPhi);
8945   }
8946 
8947   // Interleave memory: for each Interleave Group we marked earlier as relevant
8948   // for this VPlan, replace the Recipes widening its memory instructions with a
8949   // single VPInterleaveRecipe at its insertion point.
8950   for (auto IG : InterleaveGroups) {
8951     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8952         RecipeBuilder.getRecipe(IG->getInsertPos()));
8953     SmallVector<VPValue *, 4> StoredValues;
8954     for (unsigned i = 0; i < IG->getFactor(); ++i)
8955       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
8956         auto *StoreR =
8957             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
8958         StoredValues.push_back(StoreR->getStoredValue());
8959       }
8960 
8961     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8962                                         Recipe->getMask());
8963     VPIG->insertBefore(Recipe);
8964     unsigned J = 0;
8965     for (unsigned i = 0; i < IG->getFactor(); ++i)
8966       if (Instruction *Member = IG->getMember(i)) {
8967         if (!Member->getType()->isVoidTy()) {
8968           VPValue *OriginalV = Plan->getVPValue(Member);
8969           Plan->removeVPValueFor(Member);
8970           Plan->addVPValue(Member, VPIG->getVPValue(J));
8971           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8972           J++;
8973         }
8974         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8975       }
8976   }
8977 
8978   // From this point onwards, VPlan-to-VPlan transformations may change the plan
8979   // in ways that accessing values using original IR values is incorrect.
8980   Plan->disableValue2VPValue();
8981 
8982   VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
8983   VPlanTransforms::sinkScalarOperands(*Plan);
8984   VPlanTransforms::mergeReplicateRegions(*Plan);
8985   VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop);
8986 
8987   std::string PlanName;
8988   raw_string_ostream RSO(PlanName);
8989   ElementCount VF = Range.Start;
8990   Plan->addVF(VF);
8991   RSO << "Initial VPlan for VF={" << VF;
8992   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
8993     Plan->addVF(VF);
8994     RSO << "," << VF;
8995   }
8996   RSO << "},UF>=1";
8997   RSO.flush();
8998   Plan->setName(PlanName);
8999 
9000   // Fold Exit block into its predecessor if possible.
9001   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9002   // VPBasicBlock as exit.
9003   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9004 
9005   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9006   return Plan;
9007 }
9008 
9009 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9010   // Outer loop handling: They may require CFG and instruction level
9011   // transformations before even evaluating whether vectorization is profitable.
9012   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9013   // the vectorization pipeline.
9014   assert(!OrigLoop->isInnermost());
9015   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9016 
9017   // Create new empty VPlan
9018   auto Plan = std::make_unique<VPlan>();
9019 
9020   // Build hierarchical CFG
9021   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9022   HCFGBuilder.buildHierarchicalCFG();
9023 
9024   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9025        VF *= 2)
9026     Plan->addVF(VF);
9027 
9028   if (EnableVPlanPredication) {
9029     VPlanPredicator VPP(*Plan);
9030     VPP.predicate();
9031 
9032     // Avoid running transformation to recipes until masked code generation in
9033     // VPlan-native path is in place.
9034     return Plan;
9035   }
9036 
9037   SmallPtrSet<Instruction *, 1> DeadInstructions;
9038   VPlanTransforms::VPInstructionsToVPRecipes(
9039       OrigLoop, Plan,
9040       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9041       DeadInstructions, *PSE.getSE());
9042 
9043   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9044                         true, true);
9045   return Plan;
9046 }
9047 
9048 // Adjust the recipes for reductions. For in-loop reductions the chain of
9049 // instructions leading from the loop exit instr to the phi need to be converted
9050 // to reductions, with one operand being vector and the other being the scalar
9051 // reduction chain. For other reductions, a select is introduced between the phi
9052 // and live-out recipes when folding the tail.
9053 void LoopVectorizationPlanner::adjustRecipesForReductions(
9054     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9055     ElementCount MinVF) {
9056   for (auto &Reduction : CM.getInLoopReductionChains()) {
9057     PHINode *Phi = Reduction.first;
9058     const RecurrenceDescriptor &RdxDesc =
9059         Legal->getReductionVars().find(Phi)->second;
9060     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9061 
9062     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9063       continue;
9064 
9065     // ReductionOperations are orders top-down from the phi's use to the
9066     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9067     // which of the two operands will remain scalar and which will be reduced.
9068     // For minmax the chain will be the select instructions.
9069     Instruction *Chain = Phi;
9070     for (Instruction *R : ReductionOperations) {
9071       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9072       RecurKind Kind = RdxDesc.getRecurrenceKind();
9073 
9074       VPValue *ChainOp = Plan->getVPValue(Chain);
9075       unsigned FirstOpId;
9076       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9077              "Only min/max recurrences allowed for inloop reductions");
9078       // Recognize a call to the llvm.fmuladd intrinsic.
9079       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9080       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9081              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9082       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9083         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9084                "Expected to replace a VPWidenSelectSC");
9085         FirstOpId = 1;
9086       } else {
9087         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9088                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9089                "Expected to replace a VPWidenSC");
9090         FirstOpId = 0;
9091       }
9092       unsigned VecOpId =
9093           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9094       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9095 
9096       auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent())
9097                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9098                          : nullptr;
9099 
9100       if (IsFMulAdd) {
9101         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9102         // need to create an fmul recipe to use as the vector operand for the
9103         // fadd reduction.
9104         VPInstruction *FMulRecipe = new VPInstruction(
9105             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9106         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9107         WidenRecipe->getParent()->insert(FMulRecipe,
9108                                          WidenRecipe->getIterator());
9109         VecOp = FMulRecipe;
9110       }
9111       VPReductionRecipe *RedRecipe =
9112           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9113       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9114       Plan->removeVPValueFor(R);
9115       Plan->addVPValue(R, RedRecipe);
9116       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9117       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9118       WidenRecipe->eraseFromParent();
9119 
9120       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9121         VPRecipeBase *CompareRecipe =
9122             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9123         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9124                "Expected to replace a VPWidenSC");
9125         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9126                "Expected no remaining users");
9127         CompareRecipe->eraseFromParent();
9128       }
9129       Chain = R;
9130     }
9131   }
9132 
9133   // If tail is folded by masking, introduce selects between the phi
9134   // and the live-out instruction of each reduction, at the beginning of the
9135   // dedicated latch block.
9136   if (CM.foldTailByMasking()) {
9137     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9138     for (VPRecipeBase &R :
9139          Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
9140       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9141       if (!PhiR || PhiR->isInLoop())
9142         continue;
9143       VPValue *Cond =
9144           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9145       VPValue *Red = PhiR->getBackedgeValue();
9146       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9147              "reduction recipe must be defined before latch");
9148       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9149     }
9150   }
9151 }
9152 
9153 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9154 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9155                                VPSlotTracker &SlotTracker) const {
9156   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9157   IG->getInsertPos()->printAsOperand(O, false);
9158   O << ", ";
9159   getAddr()->printAsOperand(O, SlotTracker);
9160   VPValue *Mask = getMask();
9161   if (Mask) {
9162     O << ", ";
9163     Mask->printAsOperand(O, SlotTracker);
9164   }
9165 
9166   unsigned OpIdx = 0;
9167   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9168     if (!IG->getMember(i))
9169       continue;
9170     if (getNumStoreOperands() > 0) {
9171       O << "\n" << Indent << "  store ";
9172       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9173       O << " to index " << i;
9174     } else {
9175       O << "\n" << Indent << "  ";
9176       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9177       O << " = load from index " << i;
9178     }
9179     ++OpIdx;
9180   }
9181 }
9182 #endif
9183 
9184 void VPWidenCallRecipe::execute(VPTransformState &State) {
9185   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9186                                   *this, State);
9187 }
9188 
9189 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9190   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9191   State.ILV->setDebugLocFromInst(&I);
9192 
9193   // The condition can be loop invariant  but still defined inside the
9194   // loop. This means that we can't just use the original 'cond' value.
9195   // We have to take the 'vectorized' value and pick the first lane.
9196   // Instcombine will make this a no-op.
9197   auto *InvarCond =
9198       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9199 
9200   for (unsigned Part = 0; Part < State.UF; ++Part) {
9201     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9202     Value *Op0 = State.get(getOperand(1), Part);
9203     Value *Op1 = State.get(getOperand(2), Part);
9204     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9205     State.set(this, Sel, Part);
9206     State.ILV->addMetadata(Sel, &I);
9207   }
9208 }
9209 
9210 void VPWidenRecipe::execute(VPTransformState &State) {
9211   auto &I = *cast<Instruction>(getUnderlyingValue());
9212   auto &Builder = State.Builder;
9213   switch (I.getOpcode()) {
9214   case Instruction::Call:
9215   case Instruction::Br:
9216   case Instruction::PHI:
9217   case Instruction::GetElementPtr:
9218   case Instruction::Select:
9219     llvm_unreachable("This instruction is handled by a different recipe.");
9220   case Instruction::UDiv:
9221   case Instruction::SDiv:
9222   case Instruction::SRem:
9223   case Instruction::URem:
9224   case Instruction::Add:
9225   case Instruction::FAdd:
9226   case Instruction::Sub:
9227   case Instruction::FSub:
9228   case Instruction::FNeg:
9229   case Instruction::Mul:
9230   case Instruction::FMul:
9231   case Instruction::FDiv:
9232   case Instruction::FRem:
9233   case Instruction::Shl:
9234   case Instruction::LShr:
9235   case Instruction::AShr:
9236   case Instruction::And:
9237   case Instruction::Or:
9238   case Instruction::Xor: {
9239     // Just widen unops and binops.
9240     State.ILV->setDebugLocFromInst(&I);
9241 
9242     for (unsigned Part = 0; Part < State.UF; ++Part) {
9243       SmallVector<Value *, 2> Ops;
9244       for (VPValue *VPOp : operands())
9245         Ops.push_back(State.get(VPOp, Part));
9246 
9247       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9248 
9249       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9250         VecOp->copyIRFlags(&I);
9251 
9252         // If the instruction is vectorized and was in a basic block that needed
9253         // predication, we can't propagate poison-generating flags (nuw/nsw,
9254         // exact, etc.). The control flow has been linearized and the
9255         // instruction is no longer guarded by the predicate, which could make
9256         // the flag properties to no longer hold.
9257         if (State.MayGeneratePoisonRecipes.contains(this))
9258           VecOp->dropPoisonGeneratingFlags();
9259       }
9260 
9261       // Use this vector value for all users of the original instruction.
9262       State.set(this, V, Part);
9263       State.ILV->addMetadata(V, &I);
9264     }
9265 
9266     break;
9267   }
9268   case Instruction::ICmp:
9269   case Instruction::FCmp: {
9270     // Widen compares. Generate vector compares.
9271     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9272     auto *Cmp = cast<CmpInst>(&I);
9273     State.ILV->setDebugLocFromInst(Cmp);
9274     for (unsigned Part = 0; Part < State.UF; ++Part) {
9275       Value *A = State.get(getOperand(0), Part);
9276       Value *B = State.get(getOperand(1), Part);
9277       Value *C = nullptr;
9278       if (FCmp) {
9279         // Propagate fast math flags.
9280         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9281         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9282         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9283       } else {
9284         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9285       }
9286       State.set(this, C, Part);
9287       State.ILV->addMetadata(C, &I);
9288     }
9289 
9290     break;
9291   }
9292 
9293   case Instruction::ZExt:
9294   case Instruction::SExt:
9295   case Instruction::FPToUI:
9296   case Instruction::FPToSI:
9297   case Instruction::FPExt:
9298   case Instruction::PtrToInt:
9299   case Instruction::IntToPtr:
9300   case Instruction::SIToFP:
9301   case Instruction::UIToFP:
9302   case Instruction::Trunc:
9303   case Instruction::FPTrunc:
9304   case Instruction::BitCast: {
9305     auto *CI = cast<CastInst>(&I);
9306     State.ILV->setDebugLocFromInst(CI);
9307 
9308     /// Vectorize casts.
9309     Type *DestTy = (State.VF.isScalar())
9310                        ? CI->getType()
9311                        : VectorType::get(CI->getType(), State.VF);
9312 
9313     for (unsigned Part = 0; Part < State.UF; ++Part) {
9314       Value *A = State.get(getOperand(0), Part);
9315       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9316       State.set(this, Cast, Part);
9317       State.ILV->addMetadata(Cast, &I);
9318     }
9319     break;
9320   }
9321   default:
9322     // This instruction is not vectorized by simple widening.
9323     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9324     llvm_unreachable("Unhandled instruction!");
9325   } // end of switch.
9326 }
9327 
9328 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9329   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9330   // Construct a vector GEP by widening the operands of the scalar GEP as
9331   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9332   // results in a vector of pointers when at least one operand of the GEP
9333   // is vector-typed. Thus, to keep the representation compact, we only use
9334   // vector-typed operands for loop-varying values.
9335 
9336   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9337     // If we are vectorizing, but the GEP has only loop-invariant operands,
9338     // the GEP we build (by only using vector-typed operands for
9339     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9340     // produce a vector of pointers, we need to either arbitrarily pick an
9341     // operand to broadcast, or broadcast a clone of the original GEP.
9342     // Here, we broadcast a clone of the original.
9343     //
9344     // TODO: If at some point we decide to scalarize instructions having
9345     //       loop-invariant operands, this special case will no longer be
9346     //       required. We would add the scalarization decision to
9347     //       collectLoopScalars() and teach getVectorValue() to broadcast
9348     //       the lane-zero scalar value.
9349     auto *Clone = State.Builder.Insert(GEP->clone());
9350     for (unsigned Part = 0; Part < State.UF; ++Part) {
9351       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9352       State.set(this, EntryPart, Part);
9353       State.ILV->addMetadata(EntryPart, GEP);
9354     }
9355   } else {
9356     // If the GEP has at least one loop-varying operand, we are sure to
9357     // produce a vector of pointers. But if we are only unrolling, we want
9358     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9359     // produce with the code below will be scalar (if VF == 1) or vector
9360     // (otherwise). Note that for the unroll-only case, we still maintain
9361     // values in the vector mapping with initVector, as we do for other
9362     // instructions.
9363     for (unsigned Part = 0; Part < State.UF; ++Part) {
9364       // The pointer operand of the new GEP. If it's loop-invariant, we
9365       // won't broadcast it.
9366       auto *Ptr = IsPtrLoopInvariant
9367                       ? State.get(getOperand(0), VPIteration(0, 0))
9368                       : State.get(getOperand(0), Part);
9369 
9370       // Collect all the indices for the new GEP. If any index is
9371       // loop-invariant, we won't broadcast it.
9372       SmallVector<Value *, 4> Indices;
9373       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9374         VPValue *Operand = getOperand(I);
9375         if (IsIndexLoopInvariant[I - 1])
9376           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9377         else
9378           Indices.push_back(State.get(Operand, Part));
9379       }
9380 
9381       // If the GEP instruction is vectorized and was in a basic block that
9382       // needed predication, we can't propagate the poison-generating 'inbounds'
9383       // flag. The control flow has been linearized and the GEP is no longer
9384       // guarded by the predicate, which could make the 'inbounds' properties to
9385       // no longer hold.
9386       bool IsInBounds =
9387           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9388 
9389       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9390       // but it should be a vector, otherwise.
9391       auto *NewGEP = IsInBounds
9392                          ? State.Builder.CreateInBoundsGEP(
9393                                GEP->getSourceElementType(), Ptr, Indices)
9394                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9395                                                    Ptr, Indices);
9396       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9397              "NewGEP is not a pointer vector");
9398       State.set(this, NewGEP, Part);
9399       State.ILV->addMetadata(NewGEP, GEP);
9400     }
9401   }
9402 }
9403 
9404 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9405   assert(!State.Instance && "Int or FP induction being replicated.");
9406 
9407   Value *Start = getStartValue()->getLiveInIRValue();
9408   const InductionDescriptor &ID = getInductionDescriptor();
9409   TruncInst *Trunc = getTruncInst();
9410   IRBuilderBase &Builder = State.Builder;
9411   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
9412   assert(State.VF.isVector() && "must have vector VF");
9413 
9414   // The value from the original loop to which we are mapping the new induction
9415   // variable.
9416   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
9417 
9418   auto &DL = EntryVal->getModule()->getDataLayout();
9419 
9420   // Generate code for the induction step. Note that induction steps are
9421   // required to be loop-invariant
9422   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
9423     if (SE.isSCEVable(IV->getType())) {
9424       SCEVExpander Exp(SE, DL, "induction");
9425       return Exp.expandCodeFor(Step, Step->getType(),
9426                                State.CFG.VectorPreHeader->getTerminator());
9427     }
9428     return cast<SCEVUnknown>(Step)->getValue();
9429   };
9430 
9431   // Fast-math-flags propagate from the original induction instruction.
9432   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9433   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
9434     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
9435 
9436   // Now do the actual transformations, and start with creating the step value.
9437   Value *Step = CreateStepValue(ID.getStep());
9438 
9439   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
9440          "Expected either an induction phi-node or a truncate of it!");
9441 
9442   // Construct the initial value of the vector IV in the vector loop preheader
9443   auto CurrIP = Builder.saveIP();
9444   Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator());
9445   if (isa<TruncInst>(EntryVal)) {
9446     assert(Start->getType()->isIntegerTy() &&
9447            "Truncation requires an integer type");
9448     auto *TruncType = cast<IntegerType>(EntryVal->getType());
9449     Step = Builder.CreateTrunc(Step, TruncType);
9450     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
9451   }
9452 
9453   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
9454   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
9455   Value *SteppedStart = getStepVector(
9456       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
9457 
9458   // We create vector phi nodes for both integer and floating-point induction
9459   // variables. Here, we determine the kind of arithmetic we will perform.
9460   Instruction::BinaryOps AddOp;
9461   Instruction::BinaryOps MulOp;
9462   if (Step->getType()->isIntegerTy()) {
9463     AddOp = Instruction::Add;
9464     MulOp = Instruction::Mul;
9465   } else {
9466     AddOp = ID.getInductionOpcode();
9467     MulOp = Instruction::FMul;
9468   }
9469 
9470   // Multiply the vectorization factor by the step using integer or
9471   // floating-point arithmetic as appropriate.
9472   Type *StepType = Step->getType();
9473   Value *RuntimeVF;
9474   if (Step->getType()->isFloatingPointTy())
9475     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
9476   else
9477     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
9478   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
9479 
9480   // Create a vector splat to use in the induction update.
9481   //
9482   // FIXME: If the step is non-constant, we create the vector splat with
9483   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
9484   //        handle a constant vector splat.
9485   Value *SplatVF = isa<Constant>(Mul)
9486                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
9487                        : Builder.CreateVectorSplat(State.VF, Mul);
9488   Builder.restoreIP(CurrIP);
9489 
9490   // We may need to add the step a number of times, depending on the unroll
9491   // factor. The last of those goes into the PHI.
9492   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
9493                                     &*State.CFG.PrevBB->getFirstInsertionPt());
9494   VecInd->setDebugLoc(EntryVal->getDebugLoc());
9495   Instruction *LastInduction = VecInd;
9496   for (unsigned Part = 0; Part < State.UF; ++Part) {
9497     State.set(this, LastInduction, Part);
9498 
9499     if (isa<TruncInst>(EntryVal))
9500       State.ILV->addMetadata(LastInduction, EntryVal);
9501 
9502     LastInduction = cast<Instruction>(
9503         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
9504     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
9505   }
9506 
9507   LastInduction->setName("vec.ind.next");
9508   VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader);
9509   // Add induction update using an incorrect block temporarily. The phi node
9510   // will be fixed after VPlan execution. Note that at this point the latch
9511   // block cannot be used, as it does not exist yet.
9512   // TODO: Model increment value in VPlan, by turning the recipe into a
9513   // multi-def and a subclass of VPHeaderPHIRecipe.
9514   VecInd->addIncoming(LastInduction, State.CFG.VectorPreHeader);
9515 }
9516 
9517 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
9518   assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction &&
9519          "Not a pointer induction according to InductionDescriptor!");
9520   assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
9521          "Unexpected type.");
9522 
9523   auto *IVR = getParent()->getPlan()->getCanonicalIV();
9524   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
9525 
9526   if (all_of(users(), [this](const VPUser *U) {
9527         return cast<VPRecipeBase>(U)->usesScalars(this);
9528       })) {
9529     // This is the normalized GEP that starts counting at zero.
9530     Value *PtrInd = State.Builder.CreateSExtOrTrunc(
9531         CanonicalIV, IndDesc.getStep()->getType());
9532     // Determine the number of scalars we need to generate for each unroll
9533     // iteration. If the instruction is uniform, we only need to generate the
9534     // first lane. Otherwise, we generate all VF values.
9535     bool IsUniform = vputils::onlyFirstLaneUsed(this);
9536     assert((IsUniform || !State.VF.isScalable()) &&
9537            "Cannot scalarize a scalable VF");
9538     unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
9539 
9540     for (unsigned Part = 0; Part < State.UF; ++Part) {
9541       Value *PartStart =
9542           createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part);
9543 
9544       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
9545         Value *Idx = State.Builder.CreateAdd(
9546             PartStart, ConstantInt::get(PtrInd->getType(), Lane));
9547         Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx);
9548 
9549         Value *Step = CreateStepValue(IndDesc.getStep(), SE,
9550                                       State.CFG.PrevBB->getTerminator());
9551         Value *SclrGep = emitTransformedIndex(
9552             State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc);
9553         SclrGep->setName("next.gep");
9554         State.set(this, SclrGep, VPIteration(Part, Lane));
9555       }
9556     }
9557     return;
9558   }
9559 
9560   assert(isa<SCEVConstant>(IndDesc.getStep()) &&
9561          "Induction step not a SCEV constant!");
9562   Type *PhiType = IndDesc.getStep()->getType();
9563 
9564   // Build a pointer phi
9565   Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
9566   Type *ScStValueType = ScalarStartValue->getType();
9567   PHINode *NewPointerPhi =
9568       PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
9569   NewPointerPhi->addIncoming(ScalarStartValue, State.CFG.VectorPreHeader);
9570 
9571   // A pointer induction, performed by using a gep
9572   const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout();
9573   Instruction *InductionLoc = &*State.Builder.GetInsertPoint();
9574 
9575   const SCEV *ScalarStep = IndDesc.getStep();
9576   SCEVExpander Exp(SE, DL, "induction");
9577   Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
9578   Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
9579   Value *NumUnrolledElems =
9580       State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
9581   Value *InductionGEP = GetElementPtrInst::Create(
9582       IndDesc.getElementType(), NewPointerPhi,
9583       State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
9584       InductionLoc);
9585   // Add induction update using an incorrect block temporarily. The phi node
9586   // will be fixed after VPlan execution. Note that at this point the latch
9587   // block cannot be used, as it does not exist yet.
9588   // TODO: Model increment value in VPlan, by turning the recipe into a
9589   // multi-def and a subclass of VPHeaderPHIRecipe.
9590   NewPointerPhi->addIncoming(InductionGEP, State.CFG.VectorPreHeader);
9591 
9592   // Create UF many actual address geps that use the pointer
9593   // phi as base and a vectorized version of the step value
9594   // (<step*0, ..., step*N>) as offset.
9595   for (unsigned Part = 0; Part < State.UF; ++Part) {
9596     Type *VecPhiType = VectorType::get(PhiType, State.VF);
9597     Value *StartOffsetScalar =
9598         State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
9599     Value *StartOffset =
9600         State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
9601     // Create a vector of consecutive numbers from zero to VF.
9602     StartOffset = State.Builder.CreateAdd(
9603         StartOffset, State.Builder.CreateStepVector(VecPhiType));
9604 
9605     Value *GEP = State.Builder.CreateGEP(
9606         IndDesc.getElementType(), NewPointerPhi,
9607         State.Builder.CreateMul(
9608             StartOffset,
9609             State.Builder.CreateVectorSplat(State.VF, ScalarStepValue),
9610             "vector.gep"));
9611     State.set(this, GEP, Part);
9612   }
9613 }
9614 
9615 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
9616   assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
9617 
9618   // Fast-math-flags propagate from the original induction instruction.
9619   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9620   if (IndDesc.getInductionBinOp() &&
9621       isa<FPMathOperator>(IndDesc.getInductionBinOp()))
9622     State.Builder.setFastMathFlags(
9623         IndDesc.getInductionBinOp()->getFastMathFlags());
9624 
9625   Value *Step = State.get(getStepValue(), VPIteration(0, 0));
9626   auto CreateScalarIV = [&](Value *&Step) -> Value * {
9627     Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
9628     auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9629     if (!isCanonical() || CanonicalIV->getType() != Ty) {
9630       ScalarIV =
9631           Ty->isIntegerTy()
9632               ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty)
9633               : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty);
9634       ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
9635                                       getStartValue()->getLiveInIRValue(), Step,
9636                                       IndDesc);
9637       ScalarIV->setName("offset.idx");
9638     }
9639     if (TruncToTy) {
9640       assert(Step->getType()->isIntegerTy() &&
9641              "Truncation requires an integer step");
9642       ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy);
9643       Step = State.Builder.CreateTrunc(Step, TruncToTy);
9644     }
9645     return ScalarIV;
9646   };
9647 
9648   Value *ScalarIV = CreateScalarIV(Step);
9649   if (State.VF.isVector()) {
9650     buildScalarSteps(ScalarIV, Step, IndDesc, this, State);
9651     return;
9652   }
9653 
9654   for (unsigned Part = 0; Part < State.UF; ++Part) {
9655     assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
9656     Value *EntryPart;
9657     if (Step->getType()->isFloatingPointTy()) {
9658       Value *StartIdx =
9659           getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part);
9660       // Floating-point operations inherit FMF via the builder's flags.
9661       Value *MulOp = State.Builder.CreateFMul(StartIdx, Step);
9662       EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(),
9663                                             ScalarIV, MulOp);
9664     } else {
9665       Value *StartIdx =
9666           getRuntimeVF(State.Builder, Step->getType(), State.VF * Part);
9667       EntryPart = State.Builder.CreateAdd(
9668           ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction");
9669     }
9670     State.set(this, EntryPart, Part);
9671   }
9672 }
9673 
9674 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9675   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9676                                  State);
9677 }
9678 
9679 void VPBlendRecipe::execute(VPTransformState &State) {
9680   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9681   // We know that all PHIs in non-header blocks are converted into
9682   // selects, so we don't have to worry about the insertion order and we
9683   // can just use the builder.
9684   // At this point we generate the predication tree. There may be
9685   // duplications since this is a simple recursive scan, but future
9686   // optimizations will clean it up.
9687 
9688   unsigned NumIncoming = getNumIncomingValues();
9689 
9690   // Generate a sequence of selects of the form:
9691   // SELECT(Mask3, In3,
9692   //        SELECT(Mask2, In2,
9693   //               SELECT(Mask1, In1,
9694   //                      In0)))
9695   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9696   // are essentially undef are taken from In0.
9697   InnerLoopVectorizer::VectorParts Entry(State.UF);
9698   for (unsigned In = 0; In < NumIncoming; ++In) {
9699     for (unsigned Part = 0; Part < State.UF; ++Part) {
9700       // We might have single edge PHIs (blocks) - use an identity
9701       // 'select' for the first PHI operand.
9702       Value *In0 = State.get(getIncomingValue(In), Part);
9703       if (In == 0)
9704         Entry[Part] = In0; // Initialize with the first incoming value.
9705       else {
9706         // Select between the current value and the previous incoming edge
9707         // based on the incoming mask.
9708         Value *Cond = State.get(getMask(In), Part);
9709         Entry[Part] =
9710             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9711       }
9712     }
9713   }
9714   for (unsigned Part = 0; Part < State.UF; ++Part)
9715     State.set(this, Entry[Part], Part);
9716 }
9717 
9718 void VPInterleaveRecipe::execute(VPTransformState &State) {
9719   assert(!State.Instance && "Interleave group being replicated.");
9720   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9721                                       getStoredValues(), getMask());
9722 }
9723 
9724 void VPReductionRecipe::execute(VPTransformState &State) {
9725   assert(!State.Instance && "Reduction being replicated.");
9726   Value *PrevInChain = State.get(getChainOp(), 0);
9727   RecurKind Kind = RdxDesc->getRecurrenceKind();
9728   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9729   // Propagate the fast-math flags carried by the underlying instruction.
9730   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9731   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9732   for (unsigned Part = 0; Part < State.UF; ++Part) {
9733     Value *NewVecOp = State.get(getVecOp(), Part);
9734     if (VPValue *Cond = getCondOp()) {
9735       Value *NewCond = State.get(Cond, Part);
9736       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9737       Value *Iden = RdxDesc->getRecurrenceIdentity(
9738           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9739       Value *IdenVec =
9740           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9741       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9742       NewVecOp = Select;
9743     }
9744     Value *NewRed;
9745     Value *NextInChain;
9746     if (IsOrdered) {
9747       if (State.VF.isVector())
9748         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9749                                         PrevInChain);
9750       else
9751         NewRed = State.Builder.CreateBinOp(
9752             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9753             NewVecOp);
9754       PrevInChain = NewRed;
9755     } else {
9756       PrevInChain = State.get(getChainOp(), Part);
9757       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9758     }
9759     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9760       NextInChain =
9761           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9762                          NewRed, PrevInChain);
9763     } else if (IsOrdered)
9764       NextInChain = NewRed;
9765     else
9766       NextInChain = State.Builder.CreateBinOp(
9767           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9768           PrevInChain);
9769     State.set(this, NextInChain, Part);
9770   }
9771 }
9772 
9773 void VPReplicateRecipe::execute(VPTransformState &State) {
9774   if (State.Instance) { // Generate a single instance.
9775     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9776     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9777                                     IsPredicated, State);
9778     // Insert scalar instance packing it into a vector.
9779     if (AlsoPack && State.VF.isVector()) {
9780       // If we're constructing lane 0, initialize to start from poison.
9781       if (State.Instance->Lane.isFirstLane()) {
9782         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9783         Value *Poison = PoisonValue::get(
9784             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9785         State.set(this, Poison, State.Instance->Part);
9786       }
9787       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9788     }
9789     return;
9790   }
9791 
9792   // Generate scalar instances for all VF lanes of all UF parts, unless the
9793   // instruction is uniform inwhich case generate only the first lane for each
9794   // of the UF parts.
9795   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9796   assert((!State.VF.isScalable() || IsUniform) &&
9797          "Can't scalarize a scalable vector");
9798   for (unsigned Part = 0; Part < State.UF; ++Part)
9799     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9800       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9801                                       VPIteration(Part, Lane), IsPredicated,
9802                                       State);
9803 }
9804 
9805 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9806   assert(State.Instance && "Branch on Mask works only on single instance.");
9807 
9808   unsigned Part = State.Instance->Part;
9809   unsigned Lane = State.Instance->Lane.getKnownLane();
9810 
9811   Value *ConditionBit = nullptr;
9812   VPValue *BlockInMask = getMask();
9813   if (BlockInMask) {
9814     ConditionBit = State.get(BlockInMask, Part);
9815     if (ConditionBit->getType()->isVectorTy())
9816       ConditionBit = State.Builder.CreateExtractElement(
9817           ConditionBit, State.Builder.getInt32(Lane));
9818   } else // Block in mask is all-one.
9819     ConditionBit = State.Builder.getTrue();
9820 
9821   // Replace the temporary unreachable terminator with a new conditional branch,
9822   // whose two destinations will be set later when they are created.
9823   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9824   assert(isa<UnreachableInst>(CurrentTerminator) &&
9825          "Expected to replace unreachable terminator with conditional branch.");
9826   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9827   CondBr->setSuccessor(0, nullptr);
9828   ReplaceInstWithInst(CurrentTerminator, CondBr);
9829 }
9830 
9831 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9832   assert(State.Instance && "Predicated instruction PHI works per instance.");
9833   Instruction *ScalarPredInst =
9834       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9835   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9836   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9837   assert(PredicatingBB && "Predicated block has no single predecessor.");
9838   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9839          "operand must be VPReplicateRecipe");
9840 
9841   // By current pack/unpack logic we need to generate only a single phi node: if
9842   // a vector value for the predicated instruction exists at this point it means
9843   // the instruction has vector users only, and a phi for the vector value is
9844   // needed. In this case the recipe of the predicated instruction is marked to
9845   // also do that packing, thereby "hoisting" the insert-element sequence.
9846   // Otherwise, a phi node for the scalar value is needed.
9847   unsigned Part = State.Instance->Part;
9848   if (State.hasVectorValue(getOperand(0), Part)) {
9849     Value *VectorValue = State.get(getOperand(0), Part);
9850     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9851     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9852     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9853     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9854     if (State.hasVectorValue(this, Part))
9855       State.reset(this, VPhi, Part);
9856     else
9857       State.set(this, VPhi, Part);
9858     // NOTE: Currently we need to update the value of the operand, so the next
9859     // predicated iteration inserts its generated value in the correct vector.
9860     State.reset(getOperand(0), VPhi, Part);
9861   } else {
9862     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9863     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9864     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9865                      PredicatingBB);
9866     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9867     if (State.hasScalarValue(this, *State.Instance))
9868       State.reset(this, Phi, *State.Instance);
9869     else
9870       State.set(this, Phi, *State.Instance);
9871     // NOTE: Currently we need to update the value of the operand, so the next
9872     // predicated iteration inserts its generated value in the correct vector.
9873     State.reset(getOperand(0), Phi, *State.Instance);
9874   }
9875 }
9876 
9877 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9878   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9879 
9880   // Attempt to issue a wide load.
9881   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9882   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9883 
9884   assert((LI || SI) && "Invalid Load/Store instruction");
9885   assert((!SI || StoredValue) && "No stored value provided for widened store");
9886   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9887 
9888   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9889 
9890   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9891   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9892   bool CreateGatherScatter = !Consecutive;
9893 
9894   auto &Builder = State.Builder;
9895   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9896   bool isMaskRequired = getMask();
9897   if (isMaskRequired)
9898     for (unsigned Part = 0; Part < State.UF; ++Part)
9899       BlockInMaskParts[Part] = State.get(getMask(), Part);
9900 
9901   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9902     // Calculate the pointer for the specific unroll-part.
9903     GetElementPtrInst *PartPtr = nullptr;
9904 
9905     bool InBounds = false;
9906     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9907       InBounds = gep->isInBounds();
9908     if (Reverse) {
9909       // If the address is consecutive but reversed, then the
9910       // wide store needs to start at the last vector element.
9911       // RunTimeVF =  VScale * VF.getKnownMinValue()
9912       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9913       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9914       // NumElt = -Part * RunTimeVF
9915       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9916       // LastLane = 1 - RunTimeVF
9917       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9918       PartPtr =
9919           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9920       PartPtr->setIsInBounds(InBounds);
9921       PartPtr = cast<GetElementPtrInst>(
9922           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9923       PartPtr->setIsInBounds(InBounds);
9924       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9925         BlockInMaskParts[Part] =
9926             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9927     } else {
9928       Value *Increment =
9929           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9930       PartPtr = cast<GetElementPtrInst>(
9931           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9932       PartPtr->setIsInBounds(InBounds);
9933     }
9934 
9935     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9936     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9937   };
9938 
9939   // Handle Stores:
9940   if (SI) {
9941     State.ILV->setDebugLocFromInst(SI);
9942 
9943     for (unsigned Part = 0; Part < State.UF; ++Part) {
9944       Instruction *NewSI = nullptr;
9945       Value *StoredVal = State.get(StoredValue, Part);
9946       if (CreateGatherScatter) {
9947         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9948         Value *VectorGep = State.get(getAddr(), Part);
9949         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9950                                             MaskPart);
9951       } else {
9952         if (Reverse) {
9953           // If we store to reverse consecutive memory locations, then we need
9954           // to reverse the order of elements in the stored value.
9955           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
9956           // We don't want to update the value in the map as it might be used in
9957           // another expression. So don't call resetVectorValue(StoredVal).
9958         }
9959         auto *VecPtr =
9960             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
9961         if (isMaskRequired)
9962           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
9963                                             BlockInMaskParts[Part]);
9964         else
9965           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
9966       }
9967       State.ILV->addMetadata(NewSI, SI);
9968     }
9969     return;
9970   }
9971 
9972   // Handle loads.
9973   assert(LI && "Must have a load instruction");
9974   State.ILV->setDebugLocFromInst(LI);
9975   for (unsigned Part = 0; Part < State.UF; ++Part) {
9976     Value *NewLI;
9977     if (CreateGatherScatter) {
9978       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9979       Value *VectorGep = State.get(getAddr(), Part);
9980       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
9981                                          nullptr, "wide.masked.gather");
9982       State.ILV->addMetadata(NewLI, LI);
9983     } else {
9984       auto *VecPtr =
9985           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
9986       if (isMaskRequired)
9987         NewLI = Builder.CreateMaskedLoad(
9988             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
9989             PoisonValue::get(DataTy), "wide.masked.load");
9990       else
9991         NewLI =
9992             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
9993 
9994       // Add metadata to the load, but setVectorValue to the reverse shuffle.
9995       State.ILV->addMetadata(NewLI, LI);
9996       if (Reverse)
9997         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
9998     }
9999 
10000     State.set(this, NewLI, Part);
10001   }
10002 }
10003 
10004 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10005 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10006 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10007 // for predication.
10008 static ScalarEpilogueLowering getScalarEpilogueLowering(
10009     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10010     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10011     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10012     LoopVectorizationLegality &LVL) {
10013   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10014   // don't look at hints or options, and don't request a scalar epilogue.
10015   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10016   // LoopAccessInfo (due to code dependency and not being able to reliably get
10017   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10018   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10019   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10020   // back to the old way and vectorize with versioning when forced. See D81345.)
10021   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10022                                                       PGSOQueryType::IRPass) &&
10023                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10024     return CM_ScalarEpilogueNotAllowedOptSize;
10025 
10026   // 2) If set, obey the directives
10027   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10028     switch (PreferPredicateOverEpilogue) {
10029     case PreferPredicateTy::ScalarEpilogue:
10030       return CM_ScalarEpilogueAllowed;
10031     case PreferPredicateTy::PredicateElseScalarEpilogue:
10032       return CM_ScalarEpilogueNotNeededUsePredicate;
10033     case PreferPredicateTy::PredicateOrDontVectorize:
10034       return CM_ScalarEpilogueNotAllowedUsePredicate;
10035     };
10036   }
10037 
10038   // 3) If set, obey the hints
10039   switch (Hints.getPredicate()) {
10040   case LoopVectorizeHints::FK_Enabled:
10041     return CM_ScalarEpilogueNotNeededUsePredicate;
10042   case LoopVectorizeHints::FK_Disabled:
10043     return CM_ScalarEpilogueAllowed;
10044   };
10045 
10046   // 4) if the TTI hook indicates this is profitable, request predication.
10047   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10048                                        LVL.getLAI()))
10049     return CM_ScalarEpilogueNotNeededUsePredicate;
10050 
10051   return CM_ScalarEpilogueAllowed;
10052 }
10053 
10054 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10055   // If Values have been set for this Def return the one relevant for \p Part.
10056   if (hasVectorValue(Def, Part))
10057     return Data.PerPartOutput[Def][Part];
10058 
10059   if (!hasScalarValue(Def, {Part, 0})) {
10060     Value *IRV = Def->getLiveInIRValue();
10061     Value *B = ILV->getBroadcastInstrs(IRV);
10062     set(Def, B, Part);
10063     return B;
10064   }
10065 
10066   Value *ScalarValue = get(Def, {Part, 0});
10067   // If we aren't vectorizing, we can just copy the scalar map values over
10068   // to the vector map.
10069   if (VF.isScalar()) {
10070     set(Def, ScalarValue, Part);
10071     return ScalarValue;
10072   }
10073 
10074   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10075   bool IsUniform = RepR && RepR->isUniform();
10076 
10077   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10078   // Check if there is a scalar value for the selected lane.
10079   if (!hasScalarValue(Def, {Part, LastLane})) {
10080     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10081     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) ||
10082             isa<VPScalarIVStepsRecipe>(Def->getDef())) &&
10083            "unexpected recipe found to be invariant");
10084     IsUniform = true;
10085     LastLane = 0;
10086   }
10087 
10088   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10089   // Set the insert point after the last scalarized instruction or after the
10090   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10091   // will directly follow the scalar definitions.
10092   auto OldIP = Builder.saveIP();
10093   auto NewIP =
10094       isa<PHINode>(LastInst)
10095           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10096           : std::next(BasicBlock::iterator(LastInst));
10097   Builder.SetInsertPoint(&*NewIP);
10098 
10099   // However, if we are vectorizing, we need to construct the vector values.
10100   // If the value is known to be uniform after vectorization, we can just
10101   // broadcast the scalar value corresponding to lane zero for each unroll
10102   // iteration. Otherwise, we construct the vector values using
10103   // insertelement instructions. Since the resulting vectors are stored in
10104   // State, we will only generate the insertelements once.
10105   Value *VectorValue = nullptr;
10106   if (IsUniform) {
10107     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10108     set(Def, VectorValue, Part);
10109   } else {
10110     // Initialize packing with insertelements to start from undef.
10111     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10112     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10113     set(Def, Undef, Part);
10114     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10115       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10116     VectorValue = get(Def, Part);
10117   }
10118   Builder.restoreIP(OldIP);
10119   return VectorValue;
10120 }
10121 
10122 // Process the loop in the VPlan-native vectorization path. This path builds
10123 // VPlan upfront in the vectorization pipeline, which allows to apply
10124 // VPlan-to-VPlan transformations from the very beginning without modifying the
10125 // input LLVM IR.
10126 static bool processLoopInVPlanNativePath(
10127     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10128     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10129     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10130     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10131     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10132     LoopVectorizationRequirements &Requirements) {
10133 
10134   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10135     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10136     return false;
10137   }
10138   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10139   Function *F = L->getHeader()->getParent();
10140   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10141 
10142   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10143       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10144 
10145   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10146                                 &Hints, IAI);
10147   // Use the planner for outer loop vectorization.
10148   // TODO: CM is not used at this point inside the planner. Turn CM into an
10149   // optional argument if we don't need it in the future.
10150   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10151                                Requirements, ORE);
10152 
10153   // Get user vectorization factor.
10154   ElementCount UserVF = Hints.getWidth();
10155 
10156   CM.collectElementTypesForWidening();
10157 
10158   // Plan how to best vectorize, return the best VF and its cost.
10159   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10160 
10161   // If we are stress testing VPlan builds, do not attempt to generate vector
10162   // code. Masked vector code generation support will follow soon.
10163   // Also, do not attempt to vectorize if no vector code will be produced.
10164   if (VPlanBuildStressTest || EnableVPlanPredication ||
10165       VectorizationFactor::Disabled() == VF)
10166     return false;
10167 
10168   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10169 
10170   {
10171     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10172                              F->getParent()->getDataLayout());
10173     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10174                            &CM, BFI, PSI, Checks);
10175     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10176                       << L->getHeader()->getParent()->getName() << "\"\n");
10177     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10178   }
10179 
10180   // Mark the loop as already vectorized to avoid vectorizing again.
10181   Hints.setAlreadyVectorized();
10182   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10183   return true;
10184 }
10185 
10186 // Emit a remark if there are stores to floats that required a floating point
10187 // extension. If the vectorized loop was generated with floating point there
10188 // will be a performance penalty from the conversion overhead and the change in
10189 // the vector width.
10190 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10191   SmallVector<Instruction *, 4> Worklist;
10192   for (BasicBlock *BB : L->getBlocks()) {
10193     for (Instruction &Inst : *BB) {
10194       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10195         if (S->getValueOperand()->getType()->isFloatTy())
10196           Worklist.push_back(S);
10197       }
10198     }
10199   }
10200 
10201   // Traverse the floating point stores upwards searching, for floating point
10202   // conversions.
10203   SmallPtrSet<const Instruction *, 4> Visited;
10204   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10205   while (!Worklist.empty()) {
10206     auto *I = Worklist.pop_back_val();
10207     if (!L->contains(I))
10208       continue;
10209     if (!Visited.insert(I).second)
10210       continue;
10211 
10212     // Emit a remark if the floating point store required a floating
10213     // point conversion.
10214     // TODO: More work could be done to identify the root cause such as a
10215     // constant or a function return type and point the user to it.
10216     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10217       ORE->emit([&]() {
10218         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10219                                           I->getDebugLoc(), L->getHeader())
10220                << "floating point conversion changes vector width. "
10221                << "Mixed floating point precision requires an up/down "
10222                << "cast that will negatively impact performance.";
10223       });
10224 
10225     for (Use &Op : I->operands())
10226       if (auto *OpI = dyn_cast<Instruction>(Op))
10227         Worklist.push_back(OpI);
10228   }
10229 }
10230 
10231 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10232     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10233                                !EnableLoopInterleaving),
10234       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10235                               !EnableLoopVectorization) {}
10236 
10237 bool LoopVectorizePass::processLoop(Loop *L) {
10238   assert((EnableVPlanNativePath || L->isInnermost()) &&
10239          "VPlan-native path is not enabled. Only process inner loops.");
10240 
10241 #ifndef NDEBUG
10242   const std::string DebugLocStr = getDebugLocString(L);
10243 #endif /* NDEBUG */
10244 
10245   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
10246                     << L->getHeader()->getParent()->getName() << "' from "
10247                     << DebugLocStr << "\n");
10248 
10249   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10250 
10251   LLVM_DEBUG(
10252       dbgs() << "LV: Loop hints:"
10253              << " force="
10254              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10255                      ? "disabled"
10256                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10257                             ? "enabled"
10258                             : "?"))
10259              << " width=" << Hints.getWidth()
10260              << " interleave=" << Hints.getInterleave() << "\n");
10261 
10262   // Function containing loop
10263   Function *F = L->getHeader()->getParent();
10264 
10265   // Looking at the diagnostic output is the only way to determine if a loop
10266   // was vectorized (other than looking at the IR or machine code), so it
10267   // is important to generate an optimization remark for each loop. Most of
10268   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10269   // generated as OptimizationRemark and OptimizationRemarkMissed are
10270   // less verbose reporting vectorized loops and unvectorized loops that may
10271   // benefit from vectorization, respectively.
10272 
10273   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10274     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10275     return false;
10276   }
10277 
10278   PredicatedScalarEvolution PSE(*SE, *L);
10279 
10280   // Check if it is legal to vectorize the loop.
10281   LoopVectorizationRequirements Requirements;
10282   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10283                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10284   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10285     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10286     Hints.emitRemarkWithHints();
10287     return false;
10288   }
10289 
10290   // Check the function attributes and profiles to find out if this function
10291   // should be optimized for size.
10292   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10293       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10294 
10295   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10296   // here. They may require CFG and instruction level transformations before
10297   // even evaluating whether vectorization is profitable. Since we cannot modify
10298   // the incoming IR, we need to build VPlan upfront in the vectorization
10299   // pipeline.
10300   if (!L->isInnermost())
10301     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10302                                         ORE, BFI, PSI, Hints, Requirements);
10303 
10304   assert(L->isInnermost() && "Inner loop expected.");
10305 
10306   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10307   // count by optimizing for size, to minimize overheads.
10308   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10309   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10310     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10311                       << "This loop is worth vectorizing only if no scalar "
10312                       << "iteration overheads are incurred.");
10313     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10314       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10315     else {
10316       LLVM_DEBUG(dbgs() << "\n");
10317       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10318     }
10319   }
10320 
10321   // Check the function attributes to see if implicit floats are allowed.
10322   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10323   // an integer loop and the vector instructions selected are purely integer
10324   // vector instructions?
10325   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10326     reportVectorizationFailure(
10327         "Can't vectorize when the NoImplicitFloat attribute is used",
10328         "loop not vectorized due to NoImplicitFloat attribute",
10329         "NoImplicitFloat", ORE, L);
10330     Hints.emitRemarkWithHints();
10331     return false;
10332   }
10333 
10334   // Check if the target supports potentially unsafe FP vectorization.
10335   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10336   // for the target we're vectorizing for, to make sure none of the
10337   // additional fp-math flags can help.
10338   if (Hints.isPotentiallyUnsafe() &&
10339       TTI->isFPVectorizationPotentiallyUnsafe()) {
10340     reportVectorizationFailure(
10341         "Potentially unsafe FP op prevents vectorization",
10342         "loop not vectorized due to unsafe FP support.",
10343         "UnsafeFP", ORE, L);
10344     Hints.emitRemarkWithHints();
10345     return false;
10346   }
10347 
10348   bool AllowOrderedReductions;
10349   // If the flag is set, use that instead and override the TTI behaviour.
10350   if (ForceOrderedReductions.getNumOccurrences() > 0)
10351     AllowOrderedReductions = ForceOrderedReductions;
10352   else
10353     AllowOrderedReductions = TTI->enableOrderedReductions();
10354   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10355     ORE->emit([&]() {
10356       auto *ExactFPMathInst = Requirements.getExactFPInst();
10357       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10358                                                  ExactFPMathInst->getDebugLoc(),
10359                                                  ExactFPMathInst->getParent())
10360              << "loop not vectorized: cannot prove it is safe to reorder "
10361                 "floating-point operations";
10362     });
10363     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10364                          "reorder floating-point operations\n");
10365     Hints.emitRemarkWithHints();
10366     return false;
10367   }
10368 
10369   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10370   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10371 
10372   // If an override option has been passed in for interleaved accesses, use it.
10373   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10374     UseInterleaved = EnableInterleavedMemAccesses;
10375 
10376   // Analyze interleaved memory accesses.
10377   if (UseInterleaved) {
10378     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10379   }
10380 
10381   // Use the cost model.
10382   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10383                                 F, &Hints, IAI);
10384   CM.collectValuesToIgnore();
10385   CM.collectElementTypesForWidening();
10386 
10387   // Use the planner for vectorization.
10388   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10389                                Requirements, ORE);
10390 
10391   // Get user vectorization factor and interleave count.
10392   ElementCount UserVF = Hints.getWidth();
10393   unsigned UserIC = Hints.getInterleave();
10394 
10395   // Plan how to best vectorize, return the best VF and its cost.
10396   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10397 
10398   VectorizationFactor VF = VectorizationFactor::Disabled();
10399   unsigned IC = 1;
10400 
10401   if (MaybeVF) {
10402     VF = *MaybeVF;
10403     // Select the interleave count.
10404     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10405   }
10406 
10407   // Identify the diagnostic messages that should be produced.
10408   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10409   bool VectorizeLoop = true, InterleaveLoop = true;
10410   if (VF.Width.isScalar()) {
10411     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10412     VecDiagMsg = std::make_pair(
10413         "VectorizationNotBeneficial",
10414         "the cost-model indicates that vectorization is not beneficial");
10415     VectorizeLoop = false;
10416   }
10417 
10418   if (!MaybeVF && UserIC > 1) {
10419     // Tell the user interleaving was avoided up-front, despite being explicitly
10420     // requested.
10421     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10422                          "interleaving should be avoided up front\n");
10423     IntDiagMsg = std::make_pair(
10424         "InterleavingAvoided",
10425         "Ignoring UserIC, because interleaving was avoided up front");
10426     InterleaveLoop = false;
10427   } else if (IC == 1 && UserIC <= 1) {
10428     // Tell the user interleaving is not beneficial.
10429     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10430     IntDiagMsg = std::make_pair(
10431         "InterleavingNotBeneficial",
10432         "the cost-model indicates that interleaving is not beneficial");
10433     InterleaveLoop = false;
10434     if (UserIC == 1) {
10435       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10436       IntDiagMsg.second +=
10437           " and is explicitly disabled or interleave count is set to 1";
10438     }
10439   } else if (IC > 1 && UserIC == 1) {
10440     // Tell the user interleaving is beneficial, but it explicitly disabled.
10441     LLVM_DEBUG(
10442         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10443     IntDiagMsg = std::make_pair(
10444         "InterleavingBeneficialButDisabled",
10445         "the cost-model indicates that interleaving is beneficial "
10446         "but is explicitly disabled or interleave count is set to 1");
10447     InterleaveLoop = false;
10448   }
10449 
10450   // Override IC if user provided an interleave count.
10451   IC = UserIC > 0 ? UserIC : IC;
10452 
10453   // Emit diagnostic messages, if any.
10454   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10455   if (!VectorizeLoop && !InterleaveLoop) {
10456     // Do not vectorize or interleaving the loop.
10457     ORE->emit([&]() {
10458       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10459                                       L->getStartLoc(), L->getHeader())
10460              << VecDiagMsg.second;
10461     });
10462     ORE->emit([&]() {
10463       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10464                                       L->getStartLoc(), L->getHeader())
10465              << IntDiagMsg.second;
10466     });
10467     return false;
10468   } else if (!VectorizeLoop && InterleaveLoop) {
10469     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10470     ORE->emit([&]() {
10471       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10472                                         L->getStartLoc(), L->getHeader())
10473              << VecDiagMsg.second;
10474     });
10475   } else if (VectorizeLoop && !InterleaveLoop) {
10476     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10477                       << ") in " << DebugLocStr << '\n');
10478     ORE->emit([&]() {
10479       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10480                                         L->getStartLoc(), L->getHeader())
10481              << IntDiagMsg.second;
10482     });
10483   } else if (VectorizeLoop && InterleaveLoop) {
10484     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10485                       << ") in " << DebugLocStr << '\n');
10486     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10487   }
10488 
10489   bool DisableRuntimeUnroll = false;
10490   MDNode *OrigLoopID = L->getLoopID();
10491   {
10492     // Optimistically generate runtime checks. Drop them if they turn out to not
10493     // be profitable. Limit the scope of Checks, so the cleanup happens
10494     // immediately after vector codegeneration is done.
10495     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10496                              F->getParent()->getDataLayout());
10497     if (!VF.Width.isScalar() || IC > 1)
10498       Checks.Create(L, *LVL.getLAI(), PSE.getPredicate());
10499 
10500     using namespace ore;
10501     if (!VectorizeLoop) {
10502       assert(IC > 1 && "interleave count should not be 1 or 0");
10503       // If we decided that it is not legal to vectorize the loop, then
10504       // interleave it.
10505       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10506                                  &CM, BFI, PSI, Checks);
10507 
10508       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10509       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10510 
10511       ORE->emit([&]() {
10512         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10513                                   L->getHeader())
10514                << "interleaved loop (interleaved count: "
10515                << NV("InterleaveCount", IC) << ")";
10516       });
10517     } else {
10518       // If we decided that it is *legal* to vectorize the loop, then do it.
10519 
10520       // Consider vectorizing the epilogue too if it's profitable.
10521       VectorizationFactor EpilogueVF =
10522           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10523       if (EpilogueVF.Width.isVector()) {
10524 
10525         // The first pass vectorizes the main loop and creates a scalar epilogue
10526         // to be vectorized by executing the plan (potentially with a different
10527         // factor) again shortly afterwards.
10528         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10529         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10530                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10531 
10532         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10533         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10534                         DT);
10535         ++LoopsVectorized;
10536 
10537         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10538         formLCSSARecursively(*L, *DT, LI, SE);
10539 
10540         // Second pass vectorizes the epilogue and adjusts the control flow
10541         // edges from the first pass.
10542         EPI.MainLoopVF = EPI.EpilogueVF;
10543         EPI.MainLoopUF = EPI.EpilogueUF;
10544         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10545                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10546                                                  Checks);
10547 
10548         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10549 
10550         // Ensure that the start values for any VPReductionPHIRecipes are
10551         // updated before vectorising the epilogue loop.
10552         VPBasicBlock *Header =
10553             BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock();
10554         for (VPRecipeBase &R : Header->phis()) {
10555           if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10556             if (auto *Resume = MainILV.getReductionResumeValue(
10557                     ReductionPhi->getRecurrenceDescriptor())) {
10558               VPValue *StartVal = new VPValue(Resume);
10559               BestEpiPlan.addExternalDef(StartVal);
10560               ReductionPhi->setOperand(0, StartVal);
10561             }
10562           }
10563         }
10564 
10565         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10566                         DT);
10567         ++LoopsEpilogueVectorized;
10568 
10569         if (!MainILV.areSafetyChecksAdded())
10570           DisableRuntimeUnroll = true;
10571       } else {
10572         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10573                                &LVL, &CM, BFI, PSI, Checks);
10574 
10575         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10576         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10577         ++LoopsVectorized;
10578 
10579         // Add metadata to disable runtime unrolling a scalar loop when there
10580         // are no runtime checks about strides and memory. A scalar loop that is
10581         // rarely used is not worth unrolling.
10582         if (!LB.areSafetyChecksAdded())
10583           DisableRuntimeUnroll = true;
10584       }
10585       // Report the vectorization decision.
10586       ORE->emit([&]() {
10587         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10588                                   L->getHeader())
10589                << "vectorized loop (vectorization width: "
10590                << NV("VectorizationFactor", VF.Width)
10591                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10592       });
10593     }
10594 
10595     if (ORE->allowExtraAnalysis(LV_NAME))
10596       checkMixedPrecision(L, ORE);
10597   }
10598 
10599   Optional<MDNode *> RemainderLoopID =
10600       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10601                                       LLVMLoopVectorizeFollowupEpilogue});
10602   if (RemainderLoopID.hasValue()) {
10603     L->setLoopID(RemainderLoopID.getValue());
10604   } else {
10605     if (DisableRuntimeUnroll)
10606       AddRuntimeUnrollDisableMetaData(L);
10607 
10608     // Mark the loop as already vectorized to avoid vectorizing again.
10609     Hints.setAlreadyVectorized();
10610   }
10611 
10612   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10613   return true;
10614 }
10615 
10616 LoopVectorizeResult LoopVectorizePass::runImpl(
10617     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10618     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10619     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10620     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10621     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10622   SE = &SE_;
10623   LI = &LI_;
10624   TTI = &TTI_;
10625   DT = &DT_;
10626   BFI = &BFI_;
10627   TLI = TLI_;
10628   AA = &AA_;
10629   AC = &AC_;
10630   GetLAA = &GetLAA_;
10631   DB = &DB_;
10632   ORE = &ORE_;
10633   PSI = PSI_;
10634 
10635   // Don't attempt if
10636   // 1. the target claims to have no vector registers, and
10637   // 2. interleaving won't help ILP.
10638   //
10639   // The second condition is necessary because, even if the target has no
10640   // vector registers, loop vectorization may still enable scalar
10641   // interleaving.
10642   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10643       TTI->getMaxInterleaveFactor(1) < 2)
10644     return LoopVectorizeResult(false, false);
10645 
10646   bool Changed = false, CFGChanged = false;
10647 
10648   // The vectorizer requires loops to be in simplified form.
10649   // Since simplification may add new inner loops, it has to run before the
10650   // legality and profitability checks. This means running the loop vectorizer
10651   // will simplify all loops, regardless of whether anything end up being
10652   // vectorized.
10653   for (auto &L : *LI)
10654     Changed |= CFGChanged |=
10655         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10656 
10657   // Build up a worklist of inner-loops to vectorize. This is necessary as
10658   // the act of vectorizing or partially unrolling a loop creates new loops
10659   // and can invalidate iterators across the loops.
10660   SmallVector<Loop *, 8> Worklist;
10661 
10662   for (Loop *L : *LI)
10663     collectSupportedLoops(*L, LI, ORE, Worklist);
10664 
10665   LoopsAnalyzed += Worklist.size();
10666 
10667   // Now walk the identified inner loops.
10668   while (!Worklist.empty()) {
10669     Loop *L = Worklist.pop_back_val();
10670 
10671     // For the inner loops we actually process, form LCSSA to simplify the
10672     // transform.
10673     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10674 
10675     Changed |= CFGChanged |= processLoop(L);
10676   }
10677 
10678   // Process each loop nest in the function.
10679   return LoopVectorizeResult(Changed, CFGChanged);
10680 }
10681 
10682 PreservedAnalyses LoopVectorizePass::run(Function &F,
10683                                          FunctionAnalysisManager &AM) {
10684     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10685     auto &LI = AM.getResult<LoopAnalysis>(F);
10686     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10687     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10688     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10689     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10690     auto &AA = AM.getResult<AAManager>(F);
10691     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10692     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10693     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10694 
10695     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10696     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10697         [&](Loop &L) -> const LoopAccessInfo & {
10698       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10699                                         TLI, TTI, nullptr, nullptr, nullptr};
10700       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10701     };
10702     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10703     ProfileSummaryInfo *PSI =
10704         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10705     LoopVectorizeResult Result =
10706         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10707     if (!Result.MadeAnyChange)
10708       return PreservedAnalyses::all();
10709     PreservedAnalyses PA;
10710 
10711     // We currently do not preserve loopinfo/dominator analyses with outer loop
10712     // vectorization. Until this is addressed, mark these analyses as preserved
10713     // only for non-VPlan-native path.
10714     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10715     if (!EnableVPlanNativePath) {
10716       PA.preserve<LoopAnalysis>();
10717       PA.preserve<DominatorTreeAnalysis>();
10718     }
10719 
10720     if (Result.MadeCFGChange) {
10721       // Making CFG changes likely means a loop got vectorized. Indicate that
10722       // extra simplification passes should be run.
10723       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10724       // be run if runtime checks have been added.
10725       AM.getResult<ShouldRunExtraVectorPasses>(F);
10726       PA.preserve<ShouldRunExtraVectorPasses>();
10727     } else {
10728       PA.preserveSet<CFGAnalyses>();
10729     }
10730     return PA;
10731 }
10732 
10733 void LoopVectorizePass::printPipeline(
10734     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10735   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10736       OS, MapClassName2PassName);
10737 
10738   OS << "<";
10739   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10740   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10741   OS << ">";
10742 }
10743